def add_template_from_json(self, template, basename, version):
     new_tpl = AttrDict(template.copy())
     new_tpl.name = '{}_v{}'.format(basename, version)
     
     # copy old template directory
     tpl_dir = app.config['UPLOADED_TEMPLATES_DEST']
     src = os.path.join(tpl_dir, template.name)
     dst = os.path.join(tpl_dir, new_tpl.name)
     shutil.copytree(src, dst)
     #old_tpl = os.path.join(tpl_dir, 'template', 'template.xml')
     #if os.path.exists(old_tpl):
         #os.remove(old_tpl) # old xml is obsolete
     
     # genericize the template
     def visit(path, key, value):
         if key in set(['cr_date']):
             return False
         elif key in set(['id', 'template_id', 'type_id', 'attr_id']):
             return key, None            
         return key, value
     new_tpl = remap(dict(new_tpl), visit=visit)
     
     new_template = self.call('add_template', {'tmpl': new_tpl})
     
     return new_template
Пример #2
0
    def test_setattr(self):
        """
        Test that key-value pairs can be added/changes as attributes
        """
        from attrdict import AttrDict

        adict = AttrDict({'foo': 'bar'})

        adict.foo = 'baz'
        self.assertEqual(adict.foo, 'baz')
        self.assertEqual(adict['foo'], 'baz')

        adict.lorem = 'ipsum'
        self.assertEqual(adict.lorem, 'ipsum')
        self.assertEqual(adict['lorem'], 'ipsum')

        adict.alpha = {'beta': 1, 'bravo': 2}
        self.assertEqual(adict.alpha, {'beta': 1, 'bravo': 2})
        self.assertEqual(adict.alpha.beta, 1)
        self.assertEqual(adict['alpha'], {'beta': 1, 'bravo': 2})

        # with self.assertRaises(TypeError):
        try:
            adict._no = "Won't work"
        except TypeError:
            pass  # expected
        else:
            raise AssertionError("Exception not thrown")
Пример #3
0
def BuildSurprise(hist_times,t_before,base_rate,maxbin,surpmax,surpmin):
        
    # hist_times:         the array of spiketimes for each whisker and direction relative to stim
    # t_before & t_after: how long we want to compute the psths bins (all 1 msec)
    # base_rate:          rate of a 1ms bin of the blank psth
    # maxbin:             the number of binning sizes where we compute the surprise, from 1 to maxbin
    # blankw:             the blank whisker number (changes depending on the experiment)
    # surpmax:            up to what time we compute surprise (tipically 55ms)
    # nsizeth:            how many consecutive bins are required to be responsive (tipically 2)
    # surpmin:            is from the time we consider to look for a response (tipically over 5ms)
    
    # we save all in this variable as attr dictionary
    Surprise = dict()
    #-------------------------------------
    # here computing the surprise (from surpmin to surpmax in ms)
    SurpriseW = {}
   
    for w in np.arange(25):
        Surprisefixbin = {}  #here we store the 25 whiskers
        
        for binsize in np.arange(maxbin)+1:
            Surprisefixbin[binsize] = BuildSingleSurprise(hist_times[w],base_rate,binsize,surpmax,t_before)
        
        SurpriseW[w] = Surprisefixbin
            
    Surprise = AttrDict({'Data' : SurpriseW})
    Surprise.update({'logic_tree_data': '[whiskers][binsizes][direction][values] = 25x20x2x'+ str(surpmax-surpmin)})

    return Surprise    
def parse_thread_page(el: bs4.element.Tag) -> AttrDict:
    out = AttrDict()
    out.user = el.select('.postprofile dt')[0].text.strip()
    out.body_html = str(el.select('.content')[0]).strip()
    out.body_text = el.select('.content')[0].text.strip()
    out.date = el.select('.postbody .author')[0].text.strip()
    return out
Пример #5
0
    def search(self, title):
        log.debug("Searching IMDb for '{}'", title)
        results = self.imdb.search_for_title(title)

        print("Results:")
        for i, movie in enumerate(results):
            print("%s: %s (%s)" % (i, movie['title'], movie['year']))

        while True:
            choice = input('Select number or enter an alternate'
                           ' search term: [0-%s, 0 default] ' %
                           (len(results) - 1))
            try:
                choice = int(choice)
            except ValueError:
                if choice:
                    return self.search(choice)
                choice = 0

            try:
                result = results[choice]
            except IndexError:
                pass
            else:
                imdb_id = result['imdb_id']
                log.debug("Found IMDb item {}", imdb_id)
                movie = AttrDict(self.imdb.get_title(imdb_id))
                movie.credits = self.imdb.get_title_credits(imdb_id)['credits']
                movie.genres = self.imdb.get_title_genres(imdb_id)['genres']
                return ImdbResult(movie)
Пример #6
0
    def test_len(self):
        """
        Test that len works properly.
        """
        from attrdict import AttrDict

        # empty
        adict = AttrDict()
        self.assertEqual(len(adict), 0)

        # added via key
        adict['key'] = 1
        self.assertEqual(len(adict), 1)

        adict['key'] = 2
        self.assertEqual(len(adict), 1)

        # added via attribute
        adict.attribute = 3
        self.assertEqual(len(adict), 2)

        adict.key = 3
        self.assertEqual(len(adict), 2)

        # deleted
        del adict.key
        self.assertEqual(len(adict), 1)
Пример #7
0
    def __init__(self, response):
        self.raw = response
        self.name = response["name"]
        self.version = response["version"]
        self.description = response["description"]
        self.preseeds = response["preseeds"]

        AttrDict.__init__(self)
Пример #8
0
def test_pop_removes_attr():
    d = AttrDict(k=True)
    try:
        d.k
    except AttributeError:
        assert False, "AttributeError shouldn't be raised yet"
    d.pop('k')
    d.k
Пример #9
0
def test_pop_removes_item():
    d = AttrDict(k=True)
    try:
        d['k']
    except KeyError:
        assert False, "KeyError shouldn't be raised yet"
    d.pop('k')
    d['k']
Пример #10
0
def BuildPSTH(Stims, Spikes, sampling_freq, exp, meas):
    
    stimtimes = {}
    stim_samp = 1/.0009997575757
    # make an 'output dict'
    # the PSTH will be built on -tbefore:tafter
    PSTH_times = {}
    
    # Loop each neuron and get the spikes.
    for neuron in list(Spikes.keys()): 
        codename = 'exp'+ str(exp) + '_' + str(meas) + '_c' + str(neuron)
        psth = AttrDict({'clusnum': neuron,'exp' : int(exp) , 'meas': int(meas[1]) , 'shank': int(meas[3])})
        psth.update(AttrDict({'psth_counts': [] , 'psth_times': []}))
        
        histo= build_hist_dict()
        spikes = Spikes[neuron].spike_times*1000 #(want them in ms)
        
        #loop episodes and stims_per_episode, and populate the histograms
        for ep in np.arange(Stims.episodes)[:]:
            if ep<30:
                stims = 82
            else:
                stims = 28

            #print('Episode: ',ep)
            
            stims=8
            for se in np.arange(stims):#np.arange(Stims.stims_ep):
                #print('se     :',se)
                code = str(int(Stims.st_ctrl[ep][se]))
                c = str(Stims.st_logic.ctrl[code])
                                
                if code=='0':
                    t_after=500
                    start = Stims.st_times[ep][se]
                    if len(spikes[(start <= spikes) * (spikes <= start + t_after)])>0:
                        histo[c].extend(spikes[(start <= spikes) * (spikes <= start + t_after)]-start)
                        histo['Counts'][c] +=  len(spikes[(start <= spikes) * (spikes <= start + t_after)])
                else:
                    code = str(int(Stims.st_types[ep][se]))                                                
                    t = str(Stims.st_logic.types[code])
                
                    code = str(int(Stims.st_pad[ep][se]))
                    p = Stims.st_logic.pad[code]
                    
                    r = Stims.st_rep[ep][se]
                    i = Stims.st_isi[ep][se]
                    start = Stims.st_times[ep][se]
                              
                    t_after = 500*r
                    
                    if len(spikes[(start <= spikes) * (spikes <= start + t_after)])>0:
                        histo[c][t][p][r][i].extend(spikes[(start <= spikes) * (spikes <= start + t_after)]-start)
                        histo['Counts'][c][t][p][r][i]  += len((spikes[(start <= spikes) * (spikes <= start + t_after)]))
                                                       
        PSTH_times[codename] = histo
       
    return PSTH_times
Пример #11
0
    def test_valid_name(self):
        """
        Test that valid_name works.
        """
        from attrdict import AttrDict

        self.assertTrue(AttrDict._valid_name('valid'))
        self.assertFalse(AttrDict._valid_name('_invalid'))
        self.assertFalse(AttrDict._valid_name('get'))
Пример #12
0
    def test_build(self):
        """
        Test that build works.
        """
        from attrdict import AttrDict

        self.assertTrue(isinstance(AttrDict._build({}), AttrDict))
        self.assertTrue(isinstance(AttrDict._build([]), list))
        self.assertTrue(isinstance(AttrDict._build(AttrDict()), AttrDict))
        self.assertTrue(isinstance(AttrDict._build(1), int))
Пример #13
0
    def test_delattr(self):
        """
        Test that key-value pairs can be deleted as attributes.
        """
        from attrdict import AttrDict

        adict = AttrDict({'foo': 'bar', '_set': 'shadows', 'get': 'shadows'})

        del adict.foo

        # with self.assertRaises(AttributeError):
        try:
            adict.foo
        except AttributeError:
            pass  # expected
        else:
            raise AssertionError("Exception not thrown")

        # with self.assertRaises(KeyError):
        try:
            adict['foo']
        except KeyError:
            pass  # expected
        else:
            raise AssertionError("Exception not thrown")

        # with self.assertRaises(TypeError):
        try:
            del adict.lorem
        except TypeError:
            pass  # expected
        else:
            raise AssertionError("Exception not thrown")

        # with self.assertRaises(TypeError):
        try:
            del adict._set
        except TypeError:
            pass  # expected
        else:
            raise AssertionError("Exception not thrown")

        # with self.assertRaises(TypeError):
        try:
            del adict.get
        except TypeError:
            pass  # expected
        else:
            raise AssertionError("Exception not thrown")

        # make sure things weren't deleted
        adict._set
        self.assertEqual(adict.get('get'), 'shadows')
        self.assertEqual(adict, {'_set': 'shadows', 'get': 'shadows'})
Пример #14
0
 def __init__(self, id, parent):
   """ Inits a new widget.
   The constructor should never be called directly. To add a sub-widgegt use
   _add_widget. To create a root widget see report.py
   """
   self.id = id
   self.parent = parent
   self.unique_counter = 0
   self.widgets = AttrDict()
   self.values = AttrDict()
   self.value_name_to_cache_key = {}
Пример #15
0
    def __init__(self, env='prod'):

        self.env = os.environ.get('NETKI_ENV', env)

        config_file = ConfigManager.find_config_file(self.env)

        if not config_file or not os.path.isfile(config_file):
            raise Exception('Cannot Find Config File app.%s.config' % self.env)

        log.info('Loading Configuration [ENV: %s | FILE: %s]' % (self.env, config_file))

        with open(config_file,'r') as file:
            config = ConfigParser.ConfigParser()
            config.readfp(file)

            pre_transform_dict = AttrDict(config._sections)
            for k,v in pre_transform_dict.iteritems():
                if isinstance(v, dict):
                    is_changed = False
                    for key,value in v.items():

                        # Convert Bools
                        if value.strip().lower() == 'true':
                            v[key] = True
                            is_changed = True
                            continue

                        if value.strip().lower() == 'false':
                            v[key] = False
                            is_changed = True
                            continue

                        # Convert Floats
                        try:
                            if '.' in value:
                                v[key] = float(value)
                                is_changed = True
                                continue
                        except ValueError:
                            pass

                        # Convert Ints
                        try:
                            v[key] = int(value)
                            is_changed = True
                            continue
                        except ValueError:
                            pass

                    if is_changed:
                        pre_transform_dict.__setattr__(k,v)

            self.config_dict = pre_transform_dict
Пример #16
0
 def test_simple(self):
     d = AttrDict()
     d.abc = 1
     self.assertEqual(d.abc, 1)
     self.assertEqual(d['abc'], 1)
     self.assertEqual(len(d), 1)
     del(d.abc)
     self.assertRaises(AttributeError, lambda: d.abc)
     d.count = 0
     self.assertEqual(d.count, 0)
     del(d['count'])
     self.assertRaises(AttributeError, lambda: d.count)
     self.assertEqual(len(d), 0)
Пример #17
0
    def __init__(self, response):
        self.raw = response
        self.address = response['address']
        self.backup_id = response['backup_id']
        self.server_id = response['server_id']
        self.turnkey_version = response['turnkey_version']
        self.skpp = self._key_has_passphrase(response['key'])

        self.created = self._parse_datetime(response['date_created'])
        self.updated = self._parse_datetime(response['date_updated'])

        self.size = int(response['size']) # in MBs
        self.label = response['description']

        AttrDict.__init__(self)
Пример #18
0
def readkwikinfo(kwik, grupete=3):
    model = KwikModel(kwik) # load kwik model from file
    spiketimes = model.spike_times # extract the absolute spike times
    clusters = model.cluster_groups # extract the cluster names
    sample_rate = model.sample_rate # extract sampling freq
    
    spikedata = {} # initialise dictionary
    for cluster in clusters.keys():
        clustergroup = clusters[cluster]
        if clustergroup==grupete: # only look at specified type of cluster, 0 = noise, 1 = MUA, 2 = GOOD, 3 = unsorted
            spiketimematrix = AttrDict({'spike_times': np.zeros(len(spiketimes[where(model.spike_clusters==cluster)]))})
            spiketimematrix.spike_times = spiketimes[where(model.spike_clusters==cluster)]
            spikedata[cluster] = spiketimematrix # data structure is a dictionary with attribute accessible spiketimes
            # attribute accessible means that spikedata.spike_times works, normal dictionaries would be spikedata[spike_times]
    
    model.close()
    
    return spikedata, sample_rate
Пример #19
0
def BuildSig(Surprise,thresh,blankw,nconsecabove,surpmin,nsizesaboveth):

    # here computing significance
    Detection = AttrDict({})

    Sig, SigSizesList , SigTop, PW , PWstrong, SigStrength, SigStrengthNorm= IsSig(Surprise,thresh,blankw,nconsecabove,surpmin,nsizesaboveth)
										    
    Detection.update({'Sig':Sig,'Sig_sizes':SigSizesList,'Sig_top':SigTop,'PW':PW , 'PWstrong': PWstrong})        
    Detection.update({'Sig_strength': SigStrength, 'Sig_strength_norm': SigStrengthNorm})       
    
    Detection.update({'logic_tree_significants': '[whiskers][direction] = 25x2'})
    Detection.update({'logic_tree_sig_sizes': '[whiskers][binsizes][direction] = 25x20x2'})
    #-------------------------------------
    
    return Detection
Пример #20
0
    def test_has_key(self):
        """
        Test has_key behavior in regard to this python
        """
        import inspect
        from attrdict import AttrDict

        adict = AttrDict({'foo': 'bar'})
        masked = AttrDict({'has_key': 'foobar'})

        if PY2:
            self.assertTrue(inspect.ismethod(adict.has_key))
            self.assertTrue(inspect.ismethod(masked.has_key))
            self.assertFalse(adict.has_key('has_key'))
            self.assertTrue(masked.has_key('has_key'))
        else:  # Python3 dropped this method
            self.assertFalse(inspect.ismethod(masked.has_key))
            self.assertRaises(AttributeError, getattr, adict, 'has_key')
            self.assertEqual(masked.has_key, 'foobar')
Пример #21
0
def BuildPSTH(stim,stimtype, Spikes, sampling_freq, t_before, t_after,starts,stops,exp,meas) :
## The first task is to find the stimulus onset times for each whisker in each sweep in each direction
    #stim, stimtype = read_stimulus()
    stim = stim[np.where(stimtype=='F')[0], :, :]
    starts = starts[np.where(stimtype=='F')[0]]
    stops = stops[np.where(stimtype=='F')[0]]
    
    stimtimes = {}
    for w in np.arange(25, dtype='int') :  
        timesUP = []
        timesDOWN = []
        for i in np.arange(len(stim), dtype='int') :
            indsUP = (np.where(stim[i, w, :]==1108.8889)[0]-1)[::2]
            # This finds all time points where the stim = 1108.8889, because each ramp has two 1108.8889 values
            # (on the way up and on the way down) we take every other index using [::2]
            timesUP.append(indsUP)
            indsDOWN = (np.where(stim[i, w, :]==-1108.8889)[0]-1)[::2]
            # This finds all time points where the stim = -1108.8889, because each ramp has two -1108.8889 values
            # (on the way up and on the way down) we take every other index using [::2]
            timesDOWN.append(indsDOWN)
        stimtimes[w] = timesUP, timesDOWN # stimtimes[whisker][0][:]=UP stimtimes[whisker][1][:]=DOWN
    
    # make an 'output dict'
    # the PSTH will be built on -tbefore:tafter
    hist_inds = {}
    PSTH = {}
    psth = dict()
    psth_times = dict()
    
    # Loop each neuron and get the spikes.
    for neuron in list(Spikes.keys()): 
        codename = 'exp'+ str(exp) + '_' + str(meas) + '_c' + str(neuron)
        
        psth = AttrDict({'clusnum': neuron,'exp' : int(exp) , 'meas': int(meas[1]) , 'shank': int(meas[3])})
        
        psth.update(AttrDict({'psth_counts': [] , 'psth_times': [] , 'psth_length': [t_before,t_after] }))
        
        psth['psth_counts'], psth['psth_times'] = PSTH_spikes(stim, stimtype, stimtimes, Spikes[neuron].spike_times, sampling_freq, t_before, t_after, starts, stops)
        
        PSTH[codename] = psth
       
    return PSTH
Пример #22
0
    def test_get(self):
        """
        Test that attributes can be accessed (both as keys, and as
        attributes).
        """
        from attrdict import AttrDict

        adict = AttrDict({'foo': 'bar'})

        # found
        self.assertEqual(adict.get('foo'), 'bar')

        # found, default given
        self.assertEqual(adict.get('foo', 'baz'), 'bar')

        # not found
        self.assertEqual(adict.get('bar'), None)

        # not found, default given
        self.assertEqual(adict.get('bar', 'baz'), 'baz')
Пример #23
0
    def test_default_dict(self):
        """
        test attrdict's defaultdict support.
        """
        from attrdict import AttrDict

        self.assertRaises(KeyError, lambda: AttrDict()['foo'])
        self.assertRaises(AttributeError, lambda: AttrDict().foo)

        adict = AttrDict(default_factory=lambda: ('foo', 'bar', 'baz'))

        self.assertEqual(adict['foo'], ('foo', 'bar', 'baz'))
        self.assertEqual(adict('bar'), ('foo', 'bar', 'baz'))
        self.assertEqual(adict.baz, ('foo', 'bar', 'baz'))
        self.assertEqual(adict.get('lorem'), None)
        self.assertEqual(adict.get('ipsum', 'alpha'), 'alpha')

        # make sure this doesn't break access
        adict.bravo = 'charlie'

        self.assertEqual(adict['bravo'], 'charlie')
        self.assertEqual(adict('bravo'), 'charlie')
        self.assertEqual(adict.bravo, 'charlie')
        self.assertEqual(adict.get('bravo'), 'charlie')
        self.assertEqual(adict.get('bravo', 'alpha'), 'charlie')
Пример #24
0
def start_mock_server80():
    """Start a mock Tomcat Manager application

    :returns: a tuple: (url, user, password) where the server is accessible
    """
    # pylint: disable=unused-variable
    # go find an unused port
    sock = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
    sock.bind(('localhost', 0))
    address, port = sock.getsockname()
    sock.close()

    tms = AttrDict()
    tms.url = 'http://localhost:{}/manager'.format(port)
    tms.user = USER
    tms.password = PASSWORD
    tms.warfile = '/path/to/server.war'
    tms.contextfile = 'path/to/context.xml'

    mock_server = HTTPServer(('localhost', port), MockRequestHandler80)
    mock_server_thread = Thread(target=mock_server.serve_forever)
    mock_server_thread.setDaemon(True)
    mock_server_thread.start()

    return tms
Пример #25
0
    def test_iteration_3(self):
        """
        Test the iteration methods (items, keys, values[, iteritems,
        iterkeys, itervalues]).
        """
        if PY2:  # Python2.6 doesn't have skipif/skipunless
            return

        from attrdict import AttrDict

        empty = AttrDict()
        adict = AttrDict({'foo': 'bar', 'lorem': 'ipsum', 'alpha': {
            'beta': 1, 'bravo': empty}})

        iterator = empty.items()
        self.assertFalse(isinstance(iterator, list))
        self.assertEqual(list(iterator), [])

        iterator = empty.keys()
        self.assertFalse(isinstance(iterator, list))
        self.assertEqual(list(iterator), [])

        iterator = empty.values()
        self.assertFalse(isinstance(iterator, list))
        self.assertEqual(list(iterator), [])

        iterator = adict.items()
        self.assertFalse(isinstance(iterator, list))
        items = list(iterator)
        self.assertEqual(len(items), 3)
        self.assertTrue(('foo', 'bar') in items)
        self.assertTrue(('lorem', 'ipsum') in items)
        self.assertTrue(('alpha', {'beta': 1, 'bravo': empty}) in items)

        iterator = adict.keys()
        self.assertFalse(isinstance(iterator, list))
        self.assertEqual(set(iterator), set(['foo', 'lorem', 'alpha']))

        iterator = adict.values()
        self.assertFalse(isinstance(iterator, list))
        values = list(iterator)
        self.assertEqual(len(values), 3)
        self.assertTrue('bar' in values)
        self.assertTrue('ipsum' in values)
        self.assertTrue({'beta': 1, 'bravo': empty} in values)

        # make sure 'iter' methods don't exist
        self.assertFalse(hasattr(adict, 'iteritems'))
        self.assertFalse(hasattr(adict, 'iterkeys'))
        self.assertFalse(hasattr(adict, 'itervalues'))
Пример #26
0
def longest_palindromic(text):
	from attrdict import AttrDict
	result = AttrDict(half_length=0, index=0)
	text = '#' + '#'.join(text) + '#'
	for index, _ in enumerate(text):
		half_length = 0
		for distance in range(1, min(index, len(text) - index) + 1):
			print(distance, index)
			if index + distance < len(text):
				# if not equal , quiet the foo loop
				if text[index + distance] == text[index - distance]:
					half_length += 1
					# print(half_length, index)
					if half_length > result.half_length:
						result.half_length, result.index = half_length, index
				else:
					break

	start = result.index - result.half_length
	end = result.index + result.half_length + 1
	print(result, start, end)
	return text[start:end].replace('#', '')
def parse_link(link: bs4.element.Tag, domain: str) -> AttrDict:
    out = AttrDict()
    out.title = link.select('a:nth-of-type(1)')[0].text
    out.views = link.select('.views')[0].text.replace('Zugriffe', '').strip()
    out.answers = link.select('.posts')[0].text.replace('Antworten', '').strip()
    out.date = link.select('a:nth-of-type(3)')[0].text
    out.url = domain + link.select('a:nth-of-type(1)')[0].attrs['href'].replace('./', '/')
    return out
Пример #28
0
 def __init__(self, name='Untitled', state=None, domains=None, subprocess=None,
              lat=None, lev=None, num_lat=None, num_levels=None,
              input=None, verbose=True, **kwargs):
     # verbose flag used to control text output at process creation time
     self.verbose = verbose
     self.name = name
     # dictionary of domains. Keys are the domain names
     self.domains = _make_dict(domains, _Domain)
     #  If lat is given, create a simple domains
     if lat is not None:
         sfc = zonal_mean_surface()
         self.domains.update({'default': sfc})
     # dictionary of state variables (all of type Field)
     self.state = AttrDict()
     states = _make_dict(state, Field)
     for name, value in states.items():
         self.set_state(name, value)
     # dictionary of model parameters
     self.param = kwargs
     # dictionary of diagnostic quantities
     #self.diagnostics = AttrDict()
     #self._diag_vars = frozenset()
     self._diag_vars = []
     # dictionary of input quantities
     #self.input = _make_dict(input, Field)
     if input is None:
         #self._input_vars = frozenset()
         self._input_vars = []
     else:
         self.add_input(list(input.keys()))
         for name, var in input:
             self.__dict__[name] = var
     self.creation_date = time.strftime("%a, %d %b %Y %H:%M:%S %z",
                                        time.localtime())
     # subprocess is a dictionary of any sub-processes
     self.subprocess = AttrDict()
     if subprocess is not None:
         self.add_subprocesses(subprocess)
Пример #29
0
Файл: conf.py Проект: deti/efa
def read():
    import os
    import metayaml
    from attrdict import AttrDict

    def root_directory(application_path=None):
        root_path = application_path or os.path.dirname(__file__)
        while root_path and "bin" not in os.listdir(root_path):
            root_path = os.path.dirname(root_path)
        return root_path

    def config_directory():
        root_path = root_directory()
        return os.path.join(root_path, "configs")

    def fix_me():
        print("fix me")
        raise Exception("Required field is empty")

    stage_config = os.environ.get("EFA_CONFIG", os.path.join(config_directory(), "dev.yaml"))

    configs = [os.path.join(root_directory(), "scripts", "configs", "efa.yaml"),
               stage_config]
    config = metayaml.read(configs,
                           defaults={
                               "__FIX_ME__": fix_me,
                               "STAGE_DIRECTORY": config_directory(),
                               "join": os.path.join,
                               "ROOT": root_directory()
                           })
    config = AttrDict(config, recursive=True)
    for k in config.keys():
        if k == "__FIX_ME__":
            continue
        v = getattr(config, k)
        globals()[k] = v

    return config
Пример #30
0
 def __init__(self):
     # The default configuration file for KloudBuster
     default_cfg = resource_string(__name__, "cfg.scale.yaml")
     # Read the configuration file
     self.config_scale = AttrDict(yaml.safe_load(default_cfg))
     self.alt_cfg = None
     self.cred_tested = None
     self.cred_testing = None
     self.server_cfg = None
     self.client_cfg = None
     self.topo_cfg = None
     self.tenants_list = None
     self.storage_mode = False
     self.multicast_mode = False
Пример #31
0
def read_yaml(filepath):
    with open(filepath) as f:
        config = yaml.load(f)
    return AttrDict(config)
Пример #32
0
def main():
    results_dict = {
        'data_precondition': list(),
        'dataset': list(),
        'method': list(),
        'runtime': list(),
        'num_samples': list(),
        'num_agents': list()
    }
    data_precondition = 'curr'
    for dataset_name in ['eth', 'hotel', 'univ', 'zara1', 'zara2']:
        print('At %s dataset' % dataset_name)

        ### SGAN LOADING ###
        sgan_model_path = os.path.join(
            args.sgan_models_path, '_'.join([dataset_name, '12', 'model.pt']))

        checkpoint = torch.load(sgan_model_path, map_location='cpu')
        generator = eval_utils.get_generator(checkpoint)
        _args = AttrDict(checkpoint['args'])
        path = get_dset_path(_args.dataset_name, args.sgan_dset_type)
        print('Evaluating', sgan_model_path, 'on', _args.dataset_name,
              args.sgan_dset_type)

        _, sgan_data_loader = data_loader(_args, path)

        ### OUR METHOD LOADING ###
        data_dir = '../sgan-dataset/data'
        eval_data_dict_name = '%s_test.pkl' % dataset_name
        log_dir = '../sgan-dataset/logs/%s' % dataset_name

        trained_model_dir = os.path.join(
            log_dir, eval_utils.get_our_model_dir(dataset_name))
        eval_data_path = os.path.join(data_dir, eval_data_dict_name)
        with open(eval_data_path, 'rb') as f:
            eval_data_dict = pickle.load(f, encoding='latin1')
        eval_dt = eval_data_dict['dt']
        print('Loaded evaluation data from %s, eval_dt = %.2f' %
              (eval_data_path, eval_dt))

        # Loading weights from the trained model.
        specific_hyperparams = eval_utils.get_model_hyperparams(
            args, dataset_name)
        model_registrar = ModelRegistrar(trained_model_dir, args.device)
        model_registrar.load_models(specific_hyperparams['best_iter'])

        for key in eval_data_dict['input_dict'].keys():
            if isinstance(key, STGNode):
                random_node = key
                break

        hyperparams['state_dim'] = eval_data_dict['input_dict'][
            random_node].shape[2]
        hyperparams['pred_dim'] = len(eval_data_dict['pred_indices'])
        hyperparams['pred_indices'] = eval_data_dict['pred_indices']
        hyperparams['dynamic_edges'] = args.dynamic_edges
        hyperparams['edge_state_combine_method'] = specific_hyperparams[
            'edge_state_combine_method']
        hyperparams['edge_influence_combine_method'] = specific_hyperparams[
            'edge_influence_combine_method']
        hyperparams['nodes_standardization'] = eval_data_dict[
            'nodes_standardization']
        hyperparams['labels_standardization'] = eval_data_dict[
            'labels_standardization']
        hyperparams['edge_radius'] = args.edge_radius

        eval_hyperparams = copy.deepcopy(hyperparams)
        eval_hyperparams['nodes_standardization'] = eval_data_dict[
            "nodes_standardization"]
        eval_hyperparams['labels_standardization'] = eval_data_dict[
            "labels_standardization"]

        kwargs_dict = {
            'dynamic_edges':
            hyperparams['dynamic_edges'],
            'edge_state_combine_method':
            hyperparams['edge_state_combine_method'],
            'edge_influence_combine_method':
            hyperparams['edge_influence_combine_method'],
            'edge_addition_filter':
            args.edge_addition_filter,
            'edge_removal_filter':
            args.edge_removal_filter
        }

        print('-------------------------')
        print('| EVALUATION PARAMETERS |')
        print('-------------------------')
        print('| checking: %s' % data_precondition)
        print('| device: %s' % args.device)
        print('| eval_device: %s' % args.eval_device)
        print('| edge_radius: %s' % hyperparams['edge_radius'])
        print('| EE state_combine_method: %s' %
              hyperparams['edge_state_combine_method'])
        print('| EIE scheme: %s' %
              hyperparams['edge_influence_combine_method'])
        print('| dynamic_edges: %s' % hyperparams['dynamic_edges'])
        print('| edge_addition_filter: %s' % args.edge_addition_filter)
        print('| edge_removal_filter: %s' % args.edge_removal_filter)
        print('| MHL: %s' % hyperparams['minimum_history_length'])
        print('| PH: %s' % hyperparams['prediction_horizon'])
        print('| # Samples: %s' % args.num_samples)
        print('| # Runs: %s' % args.num_runs)
        print('-------------------------')

        eval_stg = OnlineSpatioTemporalGraphCVAEModel(None, model_registrar,
                                                      eval_hyperparams,
                                                      kwargs_dict,
                                                      args.eval_device)
        print('Created evaluation STG model.')

        print('About to begin evaluation computation for %s.' % dataset_name)
        with torch.no_grad():
            eval_inputs, _ = eval_utils.sample_inputs_and_labels(
                eval_data_dict, device=args.eval_device)

        (obs_traj, pred_traj_gt, obs_traj_rel, seq_start_end, data_ids,
         t_predicts) = eval_utils.get_sgan_data_format(
             eval_inputs, what_to_check=data_precondition)

        num_runs = args.num_runs
        print('num_runs, seq_start_end.shape[0]', args.num_runs,
              seq_start_end.shape[0])
        if args.num_runs > seq_start_end.shape[0]:
            print(
                'num_runs (%d) > seq_start_end.shape[0] (%d), reducing num_runs to match.'
                % (num_runs, seq_start_end.shape[0]))
            num_runs = seq_start_end.shape[0]

        random_scene_idxs = np.random.choice(seq_start_end.shape[0],
                                             size=(num_runs, ),
                                             replace=False).astype(int)

        for scene_idxs in random_scene_idxs:
            choice_list = seq_start_end[scene_idxs]

            overall_tic = time.time()
            for sample_num in range(args.num_samples):
                pred_traj_fake_rel = generator(obs_traj, obs_traj_rel,
                                               seq_start_end)
                pred_traj_fake = relative_to_abs(pred_traj_fake_rel,
                                                 obs_traj[-1])

            overall_toc = time.time()
            print('SGAN overall', overall_toc - overall_tic)
            results_dict['data_precondition'].append(data_precondition)
            results_dict['dataset'].append(dataset_name)
            results_dict['method'].append('sgan')
            results_dict['runtime'].append(overall_toc - overall_tic)
            results_dict['num_samples'].append(args.num_samples)
            results_dict['num_agents'].append(
                int(choice_list[1].item() - choice_list[0].item()))

        print('Done running SGAN')

        for node in eval_data_dict['nodes_standardization']:
            for key in eval_data_dict['nodes_standardization'][node]:
                eval_data_dict['nodes_standardization'][node][
                    key] = torch.from_numpy(
                        eval_data_dict['nodes_standardization'][node]
                        [key]).float().to(args.device)

        for node in eval_data_dict['labels_standardization']:
            for key in eval_data_dict['labels_standardization'][node]:
                eval_data_dict['labels_standardization'][node][
                    key] = torch.from_numpy(
                        eval_data_dict['labels_standardization'][node]
                        [key]).float().to(args.device)

        for run in range(num_runs):
            random_scene_idx = random_scene_idxs[run]
            data_id = data_ids[random_scene_idx]
            t_predict = t_predicts[random_scene_idx] - 1

            init_scene_dict = dict()
            for first_timestep in range(t_predict + 1):
                for node, traj_data in eval_data_dict['input_dict'].items():
                    if isinstance(node, STGNode):
                        init_pos = traj_data[data_id, first_timestep, :2]
                        if np.any(init_pos):
                            init_scene_dict[node] = init_pos

                if len(init_scene_dict) > 0:
                    break

            init_scene_graph = SceneGraph()
            init_scene_graph.create_from_scene_dict(init_scene_dict,
                                                    args.edge_radius)

            curr_inputs = {
                k: v[data_id, first_timestep:t_predict + 1]
                for k, v in eval_data_dict['input_dict'].items()
                if (isinstance(k, STGNode) and (
                    k in init_scene_graph.active_nodes))
            }
            curr_pos_inputs = {k: v[..., :2] for k, v in curr_inputs.items()}

            with torch.no_grad():
                overall_tic = time.time()
                preds_dict_most_likely = eval_stg.forward(
                    init_scene_graph,
                    curr_pos_inputs,
                    curr_inputs,
                    None,
                    hyperparams['prediction_horizon'],
                    args.num_samples,
                    most_likely=True)
                overall_toc = time.time()
                print('Our MLz overall', overall_toc - overall_tic)
                results_dict['data_precondition'].append(data_precondition)
                results_dict['dataset'].append(dataset_name)
                results_dict['method'].append('our_most_likely')
                results_dict['runtime'].append(overall_toc - overall_tic)
                results_dict['num_samples'].append(args.num_samples)
                results_dict['num_agents'].append(len(init_scene_dict))

                overall_tic = time.time()
                preds_dict_full = eval_stg.forward(
                    init_scene_graph,
                    curr_pos_inputs,
                    curr_inputs,
                    None,
                    hyperparams['prediction_horizon'],
                    args.num_samples,
                    most_likely=False)
                overall_toc = time.time()
                print('Our Full overall', overall_toc - overall_tic)
                results_dict['data_precondition'].append(data_precondition)
                results_dict['dataset'].append(dataset_name)
                results_dict['method'].append('our_full')
                results_dict['runtime'].append(overall_toc - overall_tic)
                results_dict['num_samples'].append(args.num_samples)
                results_dict['num_agents'].append(len(init_scene_dict))

        pd.DataFrame.from_dict(results_dict).to_csv(
            '../sgan-dataset/plots/data/%s_%s_runtimes.csv' %
            (data_precondition, dataset_name),
            index=False)
Пример #33
0
    def _create_fabrics_two_pr(self, name, two_fabric=False):
        self.features = {}
        self.role_definitions = {}
        self.feature_configs = []

        self.job_templates = []
        self.fabrics = []
        self.bgp_routers = []
        self.node_profiles = []
        self.role_configs = []
        self.physical_routers = []
        self.bgp_routers = []
        self.create_features(
            ['overlay-bgp', 'l2-gateway', 'l3-gateway', 'vn-interconnect'])
        self.create_physical_roles(['spine'])
        self.create_overlay_roles(['crb-gateway'])

        self.create_role_definitions([
            AttrDict({
                'name':
                'crb-gateway@spine',
                'physical_role':
                'spine',
                'overlay_role':
                'crb-gateway',
                'features':
                ['overlay-bgp', 'l2-gateway', 'l3-gateway', 'vn-interconnect'],
                'feature_configs': {}
            })
        ])

        jt = self.create_job_template('job-template-' + name + self.id())
        self.job_templates.append(jt)

        fabric = self.create_fabric('fab-' + name + self.id())
        self.fabrics.append(fabric)

        fabric2 = fabric
        if two_fabric == True:
            fabric2 = self.create_fabric('fab-' + name + '2' + self.id())
            self.fabrics.append(fabric2)

        role = 'spine'
        spine_rb_role = 'crb-gateway'
        np, rc = self.create_node_profile('node-profile-' + name + self.id(),
                                          device_family='junos-qfx',
                                          role_mappings=[
                                              AttrDict({
                                                  'physical_role':
                                                  role,
                                                  'rb_roles': [spine_rb_role]
                                              })
                                          ],
                                          job_template=jt)
        self.node_profiles.append(np)
        self.role_configs.append(rc)

        br1, pr1 = self.create_router(
            'device-1' + self.id(),
            '7.7.7.7',
            product='qfx10002',
            family='junos-qfx',
            role='spine',
            rb_roles=[spine_rb_role],
            physical_role=self.physical_roles[role],
            overlay_role=self.overlay_roles[spine_rb_role],
            fabric=fabric,
            node_profile=np)
        pr1.set_physical_router_loopback_ip('30.30.0.22')
        self._vnc_lib.physical_router_update(pr1)

        self.physical_routers.append(pr1)
        self.bgp_routers.append(br1)

        br2, pr2 = self.create_router(
            'device-2' + self.id(),
            '7.7.7.8',
            product='qfx10002',
            family='junos-qfx',
            role=role,
            rb_roles=[spine_rb_role],
            physical_role=self.physical_roles[role],
            overlay_role=self.overlay_roles[spine_rb_role],
            fabric=fabric2,
            node_profile=np)
        pr2.set_physical_router_loopback_ip('30.30.0.23')
        self._vnc_lib.physical_router_update(pr2)

        self.physical_routers.append(pr2)
        self.bgp_routers.append(br2)

        return fabric, fabric2, pr1, pr2
Пример #34
0
 def __init__(self, api, response):
     self.api = api
     self._parse_response(response)
     AttrDict.__init__(self)
Пример #35
0
def execGetMovieTitleApi():
    response = getPopularApiPerPage()
    result = makeMovieTitlesArray(AttrDict(response).results)
    return result
Пример #36
0
def load_futures_config(name):
    return AttrDict(load(open('.\\futures\\' + name + '\\config.yaml', 'rb')))
Пример #37
0
def read_yaml(fallback_file=NEPTUNE_CONFIG_PATH):
    with open(fallback_file) as f:
        config = yaml.load(f)
    return AttrDict(config)
    def create_all(self):
        self.create_features([
            'underlay-ip-clos', 'overlay-bgp', 'l2-gateway', 'l3-gateway',
            'vn-interconnect'
        ])
        self.create_physical_roles(['leaf', 'spine', 'pnf'])
        self.create_overlay_roles(['crb-gateway', 'pnf-servicechain'])
        self.create_role_definitions([
            AttrDict({
                'name': 'crb-gateway-spine',
                'physical_role': 'spine',
                'overlay_role': 'crb-gateway',
                'features': ['l3-gateway', 'vn-interconnect'],
                'feature_configs': None
            }),
            AttrDict({
                'name': 'pnf-service-chain',
                'physical_role': 'pnf',
                'overlay_role': 'pnf-servicechain',
                'features': ['l3-gateway', 'vn-interconnect'],
                'feature_configs': None
            })
        ])

        self.jt = self.create_job_template('job-template-1' + self.id())
        self.fabric = self.create_fabric('fabric-1' + self.id())
        self.np1, self.rc1 = self.create_node_profile(
            'node-profile-1' + self.id(),
            device_family='junos-qfx',
            role_mappings=[
                AttrDict({
                    'physical_role': 'spine',
                    'rb_roles': ['crb-gateway']
                })
            ],
            job_template=self.jt)
        self.np2, self.rc2 = self.create_node_profile(
            'node-profile-2' + self.id(),
            device_family='junos-srx',
            role_mappings=[
                AttrDict({
                    'physical_role': 'pnf',
                    'rb_roles': ['pnf-servicechain']
                })
            ],
            job_template=self.jt)

        rtr1_name = 'router1' + self.id()
        self.bgp_router1, self.pr1 = self.create_router(
            rtr1_name,
            '1.1.1.1',
            product='qfx10008',
            family='junos-qfx',
            role='spine',
            rb_roles=['crb-gateway', 'DCI-Gateway'],
            physical_role=self.physical_roles['spine'],
            overlay_role=self.overlay_roles['crb-gateway'],
            fabric=self.fabric,
            node_profile=self.np1)
        rtr2_name = 'router2' + self.id()
        self.bgp_router2, self.pr2 = self.create_router(
            rtr2_name,
            '1.1.1.2',
            product='qfx10008',
            family='junos-qfx',
            role='pnf',
            rb_roles=['PNF-Servicechain'],
            physical_role=self.physical_roles['pnf'],
            overlay_role=self.overlay_roles['pnf-servicechain'],
            fabric=self.fabric,
            node_profile=self.np2)

        pi1_0_name = "xe-0/0/0"
        self.pi1_0 = PhysicalInterface(pi1_0_name, parent_obj=self.pr1)
        self.pi1_0_fq = ':'.join(self.pi1_0.fq_name)
        self._vnc_lib.physical_interface_create(self.pi1_0)

        pi1_1_name = "xe-0/0/1"
        self.pi1_1 = PhysicalInterface(pi1_1_name, parent_obj=self.pr1)
        self.pi1_1_fq = ':'.join(self.pi1_1.fq_name)
        self._vnc_lib.physical_interface_create(self.pi1_1)

        pi2_0_name = "xe-0/0/0"
        self.pi2_0 = PhysicalInterface(pi2_0_name, parent_obj=self.pr2)
        self._vnc_lib.physical_interface_create(self.pi2_0)

        pi2_1_name = "xe-0/0/1"
        self.pi2_1 = PhysicalInterface(pi2_1_name, parent_obj=self.pr2)
        self._vnc_lib.physical_interface_create(self.pi2_1)

        self.vn1 = self.create_vn('1', '1.1.1.0')

        lr1_name = 'lr1-' + self.id()
        lr1_fq_name = ['default-domain', 'default-project', lr1_name]
        self.lr1 = LogicalRouter(fq_name=lr1_fq_name,
                                 parent_type='project',
                                 logical_router_type='vxlan-routing',
                                 vxlan_network_identifier='3000')
        self.lr1.set_physical_router(self.pr1)
        self._vnc_lib.logical_router_create(self.lr1)

        lr2_name = 'lr2-' + self.id()
        lr2_fq_name = ['default-domain', 'default-project', lr2_name]
        self.lr2 = LogicalRouter(fq_name=lr2_fq_name,
                                 parent_type='project',
                                 logical_router_type='vxlan-routing',
                                 vxlan_network_identifier='4000')
        self.lr2.set_physical_router(self.pr2)
        self._vnc_lib.logical_router_create(self.lr2)

        fq_name = ['default-domain', 'default-project', 'vmi-' + self.id()]
        self.vmi = VirtualMachineInterface(fq_name=fq_name,
                                           parent_type='project')
        self.vmi.set_virtual_network(self.vn1)
        self._vnc_lib.virtual_machine_interface_create(self.vmi)

        self.lr1.add_virtual_machine_interface(self.vmi)
        self._vnc_lib.logical_router_update(self.lr1)
Пример #39
0
                         np.exp([min(total_avg_cost, 100)]),
                         train_avg_batch_cost, batch_cost_avg.get_average(),
                         reader_cost_avg.get_average(),
                         batch_ips_avg.get_total_cnt(),
                         batch_ips_avg.get_average_per_sec()))
                reader_cost_avg.reset()
                batch_cost_avg.reset()
                batch_ips_avg.reset()

            if step_idx % args.save_step == 0 and step_idx != 0:
                if args.save_model:
                    model_path = os.path.join(
                        args.save_model, "step_" + str(step_idx), "transformer")
                    paddle.static.save(train_program, model_path)

            batch_id += 1
            step_idx += 1
            batch_start = time.time()

    paddle.disable_static()


if __name__ == "__main__":
    ARGS = parse_args()
    yaml_file = ARGS.config
    with open(yaml_file, 'rt') as f:
        args = AttrDict(yaml.safe_load(f))
        pprint(args)

    do_train(args)
Пример #40
0
async def test_call(response_mock):
    token = 'asdf1234'

    response_mock.post(
        'https://slack.com/api/test11',
        body=ujson.dumps({
            'res': 'hello world!',
        }),
        headers={'content-type': 'application/json'},
        status=200,
    )
    response_mock.post(
        'https://slack.com/api/test12',
        body=ujson.dumps({
            'res': 'hello world!',
            'data': {
                'extra': 'wow',
            },
        }),
        headers={'content-type': 'application/json'},
        status=200,
    )

    response_mock.post(
        'https://slack.com/api/test21',
        body=ujson.dumps({
            'error': 'aaa',
        }),
        headers={'content-type': 'application/json'},
        status=404,
    )
    response_mock.post(
        'https://slack.com/api/test22',
        body=ujson.dumps({
            'error': 'aaa',
        }),
        headers={'content-type': 'application/json'},
        status=404,
    )
    response_mock.post(
        'https://slack.com/api/test3',
        body=ujson.dumps({
            'res': 'hello world!',
        }),
        headers={'content-type': 'application/json'},
        status=200,
    )

    config = AttrDict(copy.deepcopy(DEFAULT))
    config.DATABASE_URL = 'sqlite:///'
    config.TOKEN = 'asdf1234'
    config['LOGGING']['loggers']['yui']['handlers'] = ['console']
    del config['LOGGING']['handlers']['file']
    config.REGISTER_CRONTAB = False
    bot = Bot(config)

    res = await bot.call('test11')
    assert res['res'] == 'hello world!'

    res = await bot.call('test12', data={'extra': 'wow'})
    assert res['res'] == 'hello world!'
    assert res['data']['extra'] == 'wow'

    with pytest.raises(APICallError) as e:
        await bot.call('test21')
    assert str(e.value) == 'fail to call test21 with None'
    assert e.value.status_code == 404
    assert e.value.result == {'error': 'aaa'}
    assert e.value.headers['Content-Type'] == 'application/json'

    with pytest.raises(APICallError) as e:
        await bot.call('test22', data={'extra': 'wow'})
    assert str(e.value) == "fail to call test22 with {'extra': 'wow'}"
    assert e.value.status_code == 404
    assert e.value.result == {'error': 'aaa'}
    assert e.value.headers['Content-Type'] == 'application/json'

    res = await bot.call('test3', token=token)
    assert res['res'] == 'hello world!'
Пример #41
0
    # Convert dygraph model to static graph model 
    transformer = paddle.jit.to_static(
        transformer,
        input_spec=[
            # src_word
            paddle.static.InputSpec(
                shape=[None, None], dtype="int64")
        ])

    # Save converted static graph model
    paddle.jit.save(transformer,
                    os.path.join(args.inference_model_dir, "transformer"))
    logger.info("Transformer has been saved to {}".format(
        args.inference_model_dir))


if __name__ == "__main__":
    ARGS = parse_args()
    yaml_file = ARGS.config
    with open(yaml_file, 'rt') as f:
        args = AttrDict(yaml.safe_load(f))
    args.benchmark = ARGS.benchmark
    args.vocab_file = ARGS.vocab_file
    args.unk_token = ARGS.unk_token
    args.bos_token = ARGS.bos_token
    args.eos_token = ARGS.eos_token
    pprint(args)

    do_export(args)
Пример #42
0
    def __init__(self,
                 redactions=[],
                 namespace=CONFIG_NAMESPACE,
                 session=None,
                 tapis_optional=TAPIS_OPTIONAL,
                 **kwargs):

        # Timestamp
        self.created = microseconds()

        # Tapis client
        self.client = abaco.load_client(permissive=tapis_optional)
        # Load context, from which we can load client and other bits
        self.context = abaco.load_context(enable_mocks=self.MOCK_ENABLED)
        # Message
        self.message = self.context.get('message_dict')
        # TODO - actually implement this
        self.binary = None

        # Set nickname and session
        #
        # A session in the Reactors SDK is a linked set of executions
        # that inherit an identifier from their parent. If a reactor doesn't
        # detect a session on init, it creates one from its nickname. Sessions
        # are useful for tracing chains or cycles of executions
        #
        # The SDK honors two variables: x_session and SESSION. It is also
        # possible to explicitly set 'session' when initializing a Reactor object
        #
        if session is not None and isinstance(session, str):
            self.session = session
            self.nickname = self.session
        else:
            self.nickname = petname.Generate(self.NICKNAME_WORDS,
                                             self.NICKNAME_SEP)
            self.session = sessions.get_session(self.context, self.nickname)

        # Basic properties(property name, context var)
        for pn, cv in [('uid', 'actor_id'), ('execid', 'execution_id'),
                       ('workerid', 'worker_id'), ('state', 'state'),
                       ('username', 'username'),
                       ('container_repo', 'actor_repo'),
                       ('actor_name', 'actor_name')]:
            setattr(self, pn, self.context.get(cv))

        # The 'local' property can be used by Reactor authors to add
        # conditional behaviors for local usage or testing purposes
        self.local = parse_boolean(os.environ.get('LOCALONLY'))

        # Set up Tapis API subclient(s)
        if self.client is not None:
            self.pemagent = agaveutils.recursive.PemAgent(self.client)
        else:
            self.pemagent = None

        # Load settings dict from env and file via tacconfig
        # Result: config.yml ** local environment
        self.settings = read_config(update=True, env=True, namespace=namespace)

        # Initialize logging
        self.loggers = AttrDict({
            'screen': None,
            'slack': None,
            "loggly": None
        })

        # Fields to send with each structured log response
        log_fields = {
            'agent': self.uid,
            'task': self.execid,
            'name': self.actor_name,
            'username': self.username,
            'session': self.session,
            'resource': self.container_repo,
            'subtask': self.workerid,
            'host_ip': get_host_ip()
        }

        # Build a list of strings to redact from logs
        #
        # This includes user-defined strings, variables passed to override
        # config.yml, and current Tapis secrets
        redact_strings = get_redaction_strings(redactions=redactions,
                                               agave_client=self.client,
                                               namespace=namespace)

        # Posts to these locations depending on configuration
        # STDERR - Always
        # FILE   - If log_file is provided
        # AGGREGATOR - If log_token is provided
        self.loggers.screen = logtypes.get_screen_logger(
            self.uid,
            self.execid,
            settings=self.settings,
            redactions=redact_strings,
            fields=log_fields)

        # Post plaintext logs to Slack (if configured with a webhook)
        self.loggers.slack = logtypes.get_slack_logger(
            self.uid,
            'slack',
            settings=self.settings,
            redactions=redact_strings)

        # # Post logs to Loggly
        self.loggers.loggly = logtypes.get_loggly_logger(
            self.uid,
            'loggly',
            settings=self.settings,
            redactions=redact_strings,
            fields=log_fields)

        # Alias that allows r.logger to continue working
        self.logger = self.loggers.screen
                torch.save(states, os.path.join(cfg.save_path, 'model.ckpt'))
                print('***************save modol, when f1 = %.6f and iters = %d.***************' % (most_f1, most_iters))


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--cfg', type=str, default='standard.yaml', help='path to config file')
    parser.add_argument('-m', '--mode', type=str, default='train', help='path to config file')
    args = parser.parse_args()

    cfg_file = args.cfg + '.yaml'
    print('cfg_file: ' + cfg_file)
    print('mode: ' + args.mode)

    with open('./config/' + cfg_file, 'r') as f:
        cfg = AttrDict(yaml.load(f))

    timeArray = time.localtime()
    time_stamp = time.strftime('%Y-%m-%d--%H-%M-%S', timeArray)
    print('time stamp:', time_stamp)

    cfg.path = cfg_file
    cfg.time = time_stamp

    if cfg.TRAIN.track == 'simple':
        from data_simple import Provider
    else:
        # from data_complx import Provider
        from data_complx_data1 import Provider

    if args.mode == 'train':
Пример #44
0
    options.model_augmented = True

    return options


###############################################################################
#                                   Driver                                    #
###############################################################################
if __name__ == "__main__":

    #=== Hyperparameters ===#
    with open('../config_files/hyperparameters_vae.yaml') as f:
        hyperp = yaml.safe_load(f)
    if len(sys.argv) > 1:  # if run from scheduler
        hyperp = command_line_json_string_to_dict(sys.argv[1], hyperp)
    hyperp = AttrDict(hyperp)

    #=== Options ===#
    with open('../config_files/options_vae.yaml') as f:
        options = yaml.load(f, Loader=yaml.FullLoader)
    options = AttrDict(options)
    options = add_options(options)
    options.posterior_diagonal_covariance = True

    #=== File Names ===#
    project_paths = FilePathsProject(options)
    filepaths = FilePathsPredictionAndPlotting(hyperp, options, project_paths)

    #=== Predict and Save ===#
    predict_and_plot(hyperp, options, filepaths)
Пример #45
0
def main(cli_args):
    # Read from config file and make args
    config_filename = "{}.json".format(cli_args.taxonomy)
    with open(os.path.join("config", config_filename)) as f:
        args = AttrDict(json.load(f))
    logger.info("Training/evaluation parameters {}".format(args))

    args.output_dir = os.path.join(args.ckpt_dir, args.output_dir)

    init_logger()
    set_seed(args)

    processor = GoEmotionsProcessor(args)
    label_list = processor.get_labels()

    config = BertConfig.from_pretrained(
        args.model_name_or_path,
        num_labels=len(label_list),
        finetuning_task=args.task,
        id2label={str(i): label
                  for i, label in enumerate(label_list)},
        label2id={label: i
                  for i, label in enumerate(label_list)})
    tokenizer = BertTokenizer.from_pretrained(args.tokenizer_name_or_path, )
    model = BertForMultiLabelClassification.from_pretrained(
        args.model_name_or_path, config=config)

    # GPU or CPU
    args.device = "cuda" if torch.cuda.is_available(
    ) and not args.no_cuda else "cpu"
    model.to(args.device)

    # Load dataset
    train_dataset = load_and_cache_examples(
        args, tokenizer, mode="train") if args.train_file else None
    dev_dataset = load_and_cache_examples(
        args, tokenizer, mode="dev") if args.dev_file else None
    test_dataset = load_and_cache_examples(
        args, tokenizer, mode="test") if args.test_file else None

    if dev_dataset is None:
        args.evaluate_test_during_training = True  # If there is no dev dataset, only use test dataset

    if args.do_train:
        global_step, tr_loss = train(args, model, tokenizer, train_dataset,
                                     dev_dataset, test_dataset)
        logger.info(" global_step = {}, average loss = {}".format(
            global_step, tr_loss))

    results = {}
    if args.do_eval:
        checkpoints = list(
            os.path.dirname(c) for c in sorted(
                glob.glob(args.output_dir + "/**/" + "pytorch_model.bin",
                          recursive=True)))
        if not args.eval_all_checkpoints:
            checkpoints = checkpoints[-1:]
        else:
            logging.getLogger("transformers.configuration_utils").setLevel(
                logging.WARN)  # Reduce logging
            logging.getLogger("transformers.modeling_utils").setLevel(
                logging.WARN)  # Reduce logging
        logger.info("Evaluate the following checkpoints: %s", checkpoints)
        for checkpoint in checkpoints:
            global_step = checkpoint.split("-")[-1]
            model = BertForMultiLabelClassification.from_pretrained(checkpoint)
            model.to(args.device)
            result = evaluate(args,
                              model,
                              test_dataset,
                              mode="test",
                              global_step=global_step)
            result = dict(
                (k + "_{}".format(global_step), v) for k, v in result.items())
            results.update(result)

        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
        with open(output_eval_file, "w") as f_w:
            for key in sorted(results.keys()):
                f_w.write("{} = {}\n".format(key, str(results[key])))
Пример #46
0
from attrdict import AttrDict

configs = {
    "alpha_sr": 0.1,
    "alpha_ri": 0.1,
    "alpha_rs": 0.1,
    "p_rd": 0.1,
    "num_aug": 4
}
configs = AttrDict(configs)
Пример #47
0
def main(cli_args):
    args = AttrDict(cli_args)
    logger.info("Training/evaluation parameters {}".format(args))

    args.output_dir = os.path.join(args.ckpt_dir, args.task)

    set_seed(args)

    output_mode = "classification"
    if "nsmc" in args.train_file:
        processor = NSMCProcessor(args)
    elif "kornli" in args.train_file:
        processor = KorNLIProcessor(args)
    elif "paws" in args.train_file:
        processor = PawsProcessor(args)
    elif "korsts" in args.train_file:
        processor = KorSTSProcessor(args)
        output_mode = "regression"
    elif "question-pair" in args.train_file:
        processor = QuestionPairProcessor(args)
    elif "hate-speech" in args.train_file:
        processor = HateSpeechProcessor(args)
    elif "naver-ner" in args.train_file:
        processor = NaverNerProcessor(args)
    else:
        processor = IntentProcessor(args)
    args["output_mode"] = output_mode
    labels = processor.get_labels()

    config = ElectraConfig.from_pretrained(
        args.model_name_or_path,
        num_labels=len(labels),
        id2label={str(i): label for i, label in enumerate(labels)},
        label2id={label: i for i, label in enumerate(labels)},
    )
    if args.mecab:
        tokenizer = KoNLPyBertTokenizer(
            konlpy_wordpiece=KoNLPyWordPieceTokenizer(Mecab(), use_tag=False),
            vocab_file=os.path.join(args.model_name_or_path, "vocab.txt"),
            do_lower_case=args.do_lower_case,
        )
    else:
        tokenizer = ElectraTokenizer.from_pretrained(
            args.model_name_or_path, do_lower_case=args.do_lower_case
        )

    if "naver-ner" in args.train_file:
        model = ElectraForTokenClassification.from_pretrained(
            args.model_name_or_path, config=config
        )
    else:
        model = ElectraForSequenceClassification.from_pretrained(
            args.model_name_or_path, config=config
        )

    #Re-init
    if args.do_reinit:
        init_layer(model.electra.encoder.layer, top_n_layer=1)
    
    # GPU or CPU
    args.device = "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu"
    model.to(args.device)

    # Load dataset
    if "naver-ner" in args.train_file:
        train_dataset = (
            ner_load_and_cache_examples(args, tokenizer, mode="train")
            if args.train_file
            else None
        )
        dev_dataset = (
            ner_load_and_cache_examples(args, tokenizer, mode="dev")
            if args.dev_file
            else None
        )
        test_dataset = (
            ner_load_and_cache_examples(args, tokenizer, mode="test")
            if args.test_file
            else None
        )
    else:
        train_dataset = (
            seq_cls_load_and_cache_examples(args, tokenizer, mode="train")
            if args.train_file
            else None
        )
        dev_dataset = (
            seq_cls_load_and_cache_examples(args, tokenizer, mode="dev")
            if args.dev_file
            else None
        )
        test_dataset = (
            seq_cls_load_and_cache_examples(args, tokenizer, mode="test")
            if args.test_file
            else None
        )

    if dev_dataset == None:
        args.evaluate_test_during_training = (
            True  # If there is no dev dataset, only use testset
        )

    if args.do_train:
        global_step, tr_loss = train(
            args, model, labels, train_dataset, dev_dataset, test_dataset
        )
        logger.info(" global_step = {}, average loss = {}".format(global_step, tr_loss))

    if args.do_eval and not args.do_nni:
        results = {}
        checkpoints = list(
            os.path.dirname(c)
            for c in sorted(
                glob.glob(
                    args.output_dir + "/**/" + "pytorch_model.bin", recursive=True
                )
            )
        )
        if not args.eval_all_checkpoints:
            checkpoints = checkpoints[-1:]
        else:
            logging.getLogger("transformers.configuration_utils").setLevel(
                logging.WARN
            )  # Reduce logging
            logging.getLogger("transformers.modeling_utils").setLevel(
                logging.WARN
            )  # Reduce logging
        logger.info("Evaluate the following checkpoints: %s", checkpoints)
        for checkpoint in checkpoints:
            global_step = checkpoint.split("-")[-1]
            if "naver-ner" in args.train_file:
                model = ElectraForTokenClassification.from_pretrained(checkpoint)
            else:
                model = ElectraForSequenceClassification.from_pretrained(checkpoint)
            model.to(args.device)
            result = evaluate(
                args,
                model,
                test_dataset,
                mode="test",
                labels=labels,
                global_step=global_step,
            )
            result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
            results.update(result)

        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
        with open(output_eval_file, "w") as f_w:
            for key in sorted(results.keys()):
                f_w.write("{} = {}\n".format(key, str(results[key])))
Пример #48
0
    # hyperp_of_interest_dict['activation'] = Categorical(['relu', 'elu', 'sigmoid', 'tanh'], name='activation')
    hyperp_of_interest_dict['penalty_js'] = Real(0, 1, name='penalty_js')
    #hyperp_of_interest_dict['batch_size'] = Integer(100, 500, name='batch_size')

    #####################
    #   Initial Setup   #
    #####################
    #=== Generate skopt 'space' list ===#
    space = []
    for key, val in hyperp_of_interest_dict.items():
        space.append(val)

    #=== Hyperparameters ===#
    with open('../config_files/hyperparameters_vae.yaml') as f:
        hyperp = yaml.safe_load(f)
    hyperp = AttrDict(hyperp)

    #=== Options ===#
    with open('../config_files/options_vae.yaml') as f:
        options = yaml.safe_load(f)
    options = AttrDict(options)
    options = add_options(options)
    options.model_aware = True
    options.model_augmented = False
    options.posterior_diagonal_covariance = True

    #=== File Paths ===#
    project_paths = FilePathsProject(options)
    filepaths = FilePathsHyperparameterOptimization(hyperp, options, project_paths)

    #=== Data and Prior Dictionary ===#
Пример #49
0
class OneSignalPushNotificationBackend(PushNotificationBackend):

    config = AttrDict({
        'APP_ID': None,
        'API_KEY': None,
        'LANGUAGE': None,
        'TIMEOUT': 5,  # 5s
    })

    def _is_result_partial_error(self, result):
        return not result.is_error and result.errors

    def _is_invalid_result(self, result):
        return result.is_error or self._is_result_partial_error(result)

    def publish_message(self, message):
        onesignal_client = OneSignalClient(self.config.APP_ID,
                                           self.config.API_KEY)
        onesignal_client.session = generate_session(
            slug='pymess - OneSignal',
            related_objects=(message, ),
            timeout=self.config.TIMEOUT)

        languages = {'en'}
        if self.config.LANGUAGE is not None:
            languages.add(self.config.LANGUAGE)
        notification = DeviceNotification(
            include_external_user_ids=(message.recipient, ),
            contents={language: message.content
                      for language in languages},
            headings={language: message.heading
                      for language in languages},
            data=message.extra_data,
            url=message.url,
            ios_badge_type=DeviceNotification.IOS_BADGE_TYPE_INCREASE,
            ios_badge_count=1,
        )

        try:
            result = onesignal_client.send(notification)

            extra_sender_data = message.extra_sender_data or {}
            extra_sender_data['result'] = result.body

            if self._is_invalid_result(result):
                self._update_message_after_sending_error(
                    message,
                    state=PushNotificationMessage.STATE.ERROR,
                    error=str(result.errors),
                    extra_sender_data=extra_sender_data,
                )
            else:
                self._update_message_after_sending(
                    message,
                    state=PushNotificationMessage.STATE.SENT,
                    sent_at=timezone.now(),
                    extra_sender_data=extra_sender_data,
                )
        except (JSONDecodeError, requests.exceptions.RequestException,
                OneSignalAPIError) as ex:
            self._update_message_after_sending_error(message, error=str(ex))
Пример #50
0
    def __init__(self,
                 CaseStudy=0,
                 seq=3,
                 ndraw=10000,
                 thin=1,
                 nCR=3,
                 DEpairs=3,
                 parallelUpdate=0.9,
                 pCR=True,
                 k=10,
                 pJumpRate_one=0.2,
                 steps=100,
                 savemodout=False,
                 saveout=True,
                 save_tmp_out=True,
                 Prior='LHS',
                 DoParallel=True,
                 eps=5e-2,
                 BoundHandling='Reflect',
                 lik_sigma_est=False,
                 parallel_jobs=4,
                 jr_scale=1.0,
                 rng_seed=123):

        self.CaseStudy = CaseStudy
        MCMCPar.seq = seq
        MCMCPar.ndraw = ndraw
        MCMCPar.thin = thin
        MCMCPar.nCR = nCR
        MCMCPar.DEpairs = DEpairs
        MCMCPar.parallelUpdate = parallelUpdate
        MCMCPar.Do_pCR = pCR
        MCMCPar.k = k
        MCMCPar.pJumpRate_one = pJumpRate_one
        MCMCPar.steps = steps
        MCMCPar.savemodout = savemodout
        MCMCPar.saveout = saveout
        MCMCPar.save_tmp_out = save_tmp_out
        MCMCPar.Prior = Prior
        MCMCPar.DoParallel = DoParallel
        MCMCPar.eps = eps
        MCMCPar.BoundHandling = BoundHandling
        MCMCPar.jr_scale = jr_scale
        MCMCPar.lik_sigma_est = lik_sigma_est
        Extra.n_jobs = parallel_jobs

        np.random.seed(rng_seed)
        MCMCPar.rng_seed = rng_seed

        # Set the deep model generator
        if self.CaseStudy == 3 or self.CaseStudy == 2:
            self.ndim = 50
            MCMCPar.n = self.ndim
            if self.CaseStudy == 3:
                weight_file = '/home/elaloy/VAE_MCMC/saved_models/3d_ti/vaecnn3d_100ep_con.pkl'
                from vaecnn_gen_3Dmodel_func import buildnet
            elif self.CaseStudy == 2:
                weight_file = '/home/elaloy/VAE_MCMC/saved_models/2d_ti/vaecnn2d_100ep_unc.pkl'
                from vaecnn_gen_2Dmodel_func import buildnet
            DNN = AttrDict()
            DNN.ndr = self.ndim
            DNN.ae_encode_mu, DNN.ae_encode_log_sigma, DNN.noise_adjust, DNN.gen_from_noise, DNN.gen_from_enc = buildnet(
                weight_file, DNN.ndr)
            DNN.threshold = True
            DNN.nloop = 10
            self.DNN = DNN

            # Set some remaining MCMC parameters
            MCMCPar.lb = np.zeros((1, MCMCPar.n)) - 5
            MCMCPar.ub = np.zeros((1, MCMCPar.n)) + 5
            MCMCPar.m0 = 20 * MCMCPar.n

        # Set the inverse problem
        if self.CaseStudy == 3:
            ModelName = 'forward_model_flow'
            MCMCPar.lik = 2
            Extra.SimType = 3  # Transient 3D hydraulic tomography
            tmp_meas = sio.loadmat('MeasurementData_3DCaseStudy.mat')
            # Load true 3D K field - for reference only
            Extra.trueK = np.swapaxes(
                np.reshape(np.loadtxt('trueK_3D.txt'), (30, 32, 27),
                           order="F"), 0, 2)
            Extra.idx = tmp_meas['iim'].T
            Measurement.MeasData = tmp_meas['simc'].T
            Measurement.N = 1568
            Extra.cdt = tmp_meas['cdt']
            del tmp_meas
            MCMCPar.AdaptSigma = True
            if MCMCPar.AdaptSigma == True:
                Measurement.Sigma0 = 0.1
                Measurement.Sigma1 = 0.02
                Measurement.Sigma = np.array(Measurement.Sigma0)
            else:
                Measurement.Sigma = 0.02

        elif self.CaseStudy == 2:
            Measurement.N = 49
            ModelName = 'forward_model_flow'
            MCMCPar.lik = 2
            Extra.SimType = 1  # 2D steady-state flow
            # Load true 2D K field - for reference only
            Extra.trueK = np.loadtxt('trueK_2D.txt')
            Extra.idx = np.array([[20, 20], [35, 35], [65, 65], [80, 80],
                                  [20, 80], [80, 20], [35, 65], [65, 35],
                                  [51, 51], [51, 65], [51, 35], [35, 51],
                                  [65, 51], [20, 35], [20, 51], [20, 65],
                                  [35, 20], [51, 20], [65, 20], [80, 35],
                                  [80, 51], [80, 65], [35, 80], [51, 80],
                                  [65, 80], [5, 5], [5, 20], [5, 35], [5, 51],
                                  [5, 65], [5, 80], [5, 95], [20, 5], [35, 5],
                                  [51, 5], [65, 5], [80, 5], [95, 5], [95, 20],
                                  [95, 35], [95, 51], [95, 65], [95, 80],
                                  [95, 95], [20, 95], [35, 95], [51, 95],
                                  [65, 95], [80, 95]])

            with open('MeasurementData_2DCaseStudy' + '.pkl', 'rb') as f:
                tmp_meas = pickle.load(f)
            Measurement.MeasData = tmp_meas['meas_data']

            MCMCPar.AdaptSigma = True
            if MCMCPar.AdaptSigma == True:
                Measurement.Sigma0 = 0.10
                Measurement.Sigma1 = 0.02
                Measurement.Sigma = np.array(Measurement.Sigma0)
            else:
                Measurement.Sigma = 0.02

        elif self.CaseStudy == 1:
            # A theoretical 10-dimensional bimodal distribution made of 2 Gaussians
            # (example 3 in Matlab DREAMzs code)
            self.ndim = 10
            MCMCPar.n = self.ndim
            MCMCPar.Prior = 'COV'
            MCMCPar.lb = np.zeros((1, MCMCPar.n)) - 100
            MCMCPar.ub = np.zeros((1, MCMCPar.n)) + 100
            MCMCPar.BoundHandling = None
            MCMCPar.m0 = 10 * MCMCPar.n
            Measurement.N = 1
            ModelName = 'theoretical_case_bimodal_mvn'
            MCMCPar.lik = 1
            Extra.cov1 = np.eye(MCMCPar.n)
            Extra.cov2 = np.eye(MCMCPar.n)
            Extra.mu1 = np.zeros((MCMCPar.n)) - 5
            Extra.mu2 = np.zeros((MCMCPar.n)) + 5

        elif self.CaseStudy == 0:
            # A theoretical multivariate normal distribution with 100 correlated dimensions
            # (example 2 in Matlab DREAM code)
            self.ndim = 100
            MCMCPar.n = self.ndim
            MCMCPar.Prior = 'LHS'
            MCMCPar.lb = np.zeros((1, MCMCPar.n)) - 5
            MCMCPar.ub = np.zeros((1, MCMCPar.n)) + 15
            MCMCPar.BoundHandling = 'Reflect'
            MCMCPar.m0 = 10 * MCMCPar.n
            Measurement.N = 1
            ModelName = 'theoretical_case_mvn'
            MCMCPar.lik = 0

            A = 0.5 * np.eye(MCMCPar.n) + 0.5 * np.ones(MCMCPar.n)
            cov = np.zeros((MCMCPar.n, MCMCPar.n))
            # Rescale to variance-covariance matrix of interest
            for i in range(0, MCMCPar.n):
                for j in range(0, MCMCPar.n):
                    cov[i, j] = A[i, j] * np.sqrt((i + 1) * (j + 1))
            Extra.C = cov
            Extra.invC = np.linalg.inv(cov)

        else:  # This should not happen and is thus probably not needed
            self.ndim = 1
            MCMCPar.n = self.ndim
            MCMCPar.lb = np.zeros((1, MCMCPar.n))
            MCMCPar.ub = np.zeros((1, MCMCPar.n)) + 1
            MCMCPar.BoundHandling = None
            Measurement.N = 1
            ModelName = None
            MCMCPar.lik = 1
            MCMCPar.m0 = 10 * MCMCPar.n

        self.MCMCPar = MCMCPar
        self.Measurement = Measurement
        self.Extra = Extra
        self.ModelName = ModelName
Пример #51
0
SOLUTION_CONFIG = AttrDict({
    'env': {'experiment_dir': PARAMS.experiment_dir},
    'execution': GLOBAL_CONFIG,
    'xy_splitter': {
        'unet': {'x_columns': X_COLUMNS,
                 'y_columns': Y_COLUMNS,
                 },
    },
    'reader': {
        'unet': {'x_columns': X_COLUMNS,
                 'y_columns': Y_COLUMNS,
                 },
    },
    'loader': {'dataset_params': {'h': PARAMS.image_h,
                                  'w': PARAMS.image_w,
                                  'pad_method': PARAMS.pad_method,
                                  'image_source': 'disk',
                                  'divisor': 64,
                                  'target_format': PARAMS.target_format
                                  },
               'loader_params': {'training': {'batch_size': PARAMS.batch_size_train,
                                              'shuffle': True,
                                              'num_workers': PARAMS.num_workers,
                                              'pin_memory': PARAMS.pin_memory
                                              },
                                 'inference': {'batch_size': PARAMS.batch_size_inference,
                                               'shuffle': False,
                                               'num_workers': PARAMS.num_workers,
                                               'pin_memory': PARAMS.pin_memory
                                               },
                                 },
               },
    'model': {
        'unet': {
            'architecture_config': {'model_params': {'n_filters': PARAMS.n_filters,
                                                     'conv_kernel': PARAMS.conv_kernel,
                                                     'pool_kernel': PARAMS.pool_kernel,
                                                     'pool_stride': PARAMS.pool_stride,
                                                     'repeat_blocks': PARAMS.repeat_blocks,
                                                     'batch_norm': PARAMS.use_batch_norm,
                                                     'dropout': PARAMS.dropout_conv,
                                                     'in_channels': PARAMS.image_channels,
                                                     'out_channels': PARAMS.unet_output_channels,
                                                     'nr_outputs': PARAMS.nr_unet_outputs,
                                                     'encoder': PARAMS.encoder,
                                                     'activation': PARAMS.unet_activation,
                                                     'dice_weight': PARAMS.dice_weight,
                                                     'bce_weight': PARAMS.bce_weight,
                                                     },
                                    'optimizer_params': {'lr': PARAMS.lr,
                                                         },
                                    'regularizer_params': {'regularize': True,
                                                           'weight_decay_conv2d': PARAMS.l2_reg_conv,
                                                           },
                                    'weights_init': {'function': 'xavier',
                                                     },
                                    },
            'training_config': TRAINING_CONFIG,
            'callbacks_config': {'model_checkpoint': {
                'filepath': os.path.join(GLOBAL_CONFIG['exp_root'], 'checkpoints', 'unet', 'best.torch'),
                'epoch_every': 1,
                'metric_name': PARAMS.validation_metric_name,
                'minimize': PARAMS.minimize_validation_metric},
                'lr_scheduler': {'gamma': PARAMS.gamma,
                                 'epoch_every': 1},
                'training_monitor': {'batch_every': 0,
                                     'epoch_every': 1},
                'experiment_timing': {'batch_every': 0,
                                      'epoch_every': 1},
                'validation_monitor': {'epoch_every': 1,
                                       'data_dir': PARAMS.train_images_dir,
                                       'annotation_file': PARAMS.annotation_file,
                                       'loader_mode': PARAMS.loader_mode},
                'neptune_monitor': {'model_name': 'unet',
                                    'image_nr': 4,
                                    'image_resize': 0.2},
                'early_stopping': {'patience': PARAMS.patience,
                                   'metric_name': PARAMS.validation_metric_name,
                                   'minimize': PARAMS.minimize_validation_metric},
            }
        },
    },
    'tta_generator': {'flip_ud': False,
                      'flip_lr': True,
                      'rotation': False,
                      'color_shift_runs': 4},
    'tta_aggregator': {'method': PARAMS.tta_aggregation_method,
                       'nthreads': PARAMS.num_threads
                       },
    'thresholder': {'threshold_masks': PARAMS.threshold_masks,
                    },
})
Пример #52
0
import numpy as np
import numpy.matlib as matlib
import scipy.io as sio
try:
    import cPickle as pickle
except:
    import pickle

import time

from mcmc_func import *  # This imports both all Dream_zs and inverse problem-related functions

from attrdict import AttrDict

MCMCPar = AttrDict()

MCMCVar = AttrDict()

Measurement = AttrDict()

OutDiag = AttrDict()

Extra = AttrDict()


class Sampler:
    def __init__(self,
                 CaseStudy=0,
                 seq=3,
                 ndraw=10000,
Пример #53
0
config = AttrDict({
    "mass": 1.0,
    "control_multiplier": 1.0,
    "action_mode": [0.0, 1.0][1], # thrust control (0.0) / velocity control (1.0)
    "dust_storm": AttrDict({ # TODO: thicker dust storm not obvious
        "enable": 0.0, # float value other than 0.0 will enable
        "start_size_multiplier": 75.0 # particle size; larger value gives thicker dust storm
    }),
    "wind_zone": AttrDict({ # NOTE: remember to set freeze_position.x/y to false when enabling wind zone
        "enable": 1.0, # float value other than 0.0 will enable
        "force_low": AttrDict({ # random wind with force subject to uniform[low, high]
            "x": -40.0, 
            "y": 0.0, # y-axis wind force should always be zero as this is up-and-down direction
            "z": -50.0,
        }),
        "force_high": AttrDict({ # constant directional wind force over the object
            "x": 40.0, 
            "y": 0.0, # y-axis wind force should always be zero as this is up-and-down direction
            "z": 50.0,
        })
    }),
    "rigid_body": AttrDict({ 
        "freeze_position": AttrDict({ # float value other than 0.0 is true
            "x": 0.0,
            "y": 0.0, # never fix y translation as we are doing free fall
            "z": 0.0,
        }),
        "freeze_rotation": AttrDict({ # float value other than 0.0 is true
            "x": 0.0,
            "y": 0.0,
            "z": 0.0,
        })
    }),
    "cloud_shadow": AttrDict({
        "enable": 0.0, # float value other than 0.0 will enable
        "world_size": 300.0, # don't set to very small value otherwise cloud movement looks discrete
        "speed_multiplier": 5.0, # larger values give faster moving cloud (shadow)
        "coverage_modifier": 0.0, # -1.0 ~ 1.0, larger value gives larger coverage of shadow 
    }),
    "rotational_light": AttrDict({ # light rotates about x-axis within +-interval with fixed step size
        "enable": 0.0, # float value other than 0.0 will enable
        "interval": 10.0, # light rotate in the range of light_original_rotation +- interval
        "step": 1.0 # larger number gives faster rotating light source
    }),
    "position": AttrDict({ # starting position of the object
        "x": -120.9,
        "y": 27.4834, 
        "z": 792.7
    }),
    "landing_zone": AttrDict({
        "enable":  0.0, # float value other than 0.0 will enable
        "offset": AttrDict({
            "x": 31.5,
            "y": -47.4,
            "z": 23.5
        })
    })
})
Пример #54
0
def main():

    rx = Reactor()
    m = AttrDict(rx.context.message_dict)

    if m == {}:
        try:
            jsonmsg = json.loads(rx.context.raw_message)
            m = jsonmsg
        except Exception:
            pass

    #    ['event', 'agavejobs', 'create', 'delete']
    action = "emptypost"
    try:
        for a in ["aloejobs", "event", "agavejobs"]:
            try:
                rx.logger.info("Testing against {} schema".format(a))
                rx.validate_message(m,
                                    messageschema="/schemas/" + a +
                                    ".jsonschema",
                                    permissive=False)
                action = a
                break
            except Exception as exc:
                print("Validation error: {}".format(exc))
        if action is None:
            pprint(m)
            raise ValidationError("Message did not a known schema")
    except Exception as vexc:
        rx.on_failure("Failed to process message", vexc)

    # rx.logger.debug("SCHEMA DETECTED: {}".format(action))

    # store = PipelineJobStore(mongodb=rx.settings.mongodb)
    # Process the event

    # Get URL params from Abaco context
    #
    # These can be overridden by the event body or custom
    # code implemented to process the message. This has a
    # side effect of allowing the manager to process empty
    # POST bodies so long as the right values are presented
    # as URL params.
    #
    # cb_* variables are always overridden by the contents of
    #   the POST body
    #
    cb_event_name = rx.context.get("event", None)
    cb_job_uuid = rx.context.get("uuid", None)
    cb_token = rx.context.get("token", "null")
    # Accept a 'note' as a URL parameter
    # TODO - urldecode the contents of 'note'
    cb_note = rx.context.get("note", "Event had no JSON payload")
    # NOTE - contents of cb_data will be overridden in create, event. aloejob
    cb_data = {"note": cb_note}
    # Accept 'status', the Aloe-centric name for job.state
    # as well as 'state'
    cb_agave_status = rx.context.get("status", rx.context.get("state", None))

    # Prepare template PipelineJobsEvent
    event_dict = {
        "uuid": cb_job_uuid,
        "name": cb_event_name,
        "token": cb_token,
        "data": cb_data,
    }

    # This is the default message schema 'event'
    if action == "event":
        # Filter message and override values in event_dict with its contents
        for k in ["uuid", "name", "token", "data"]:
            event_dict[k] = m.get(k, event_dict.get(k))

    # AgaveJobs can update the status of an existing job but cannot
    # create one. To do so, an Agave job must be launched
    # using the PipelineJobsAgaveProxy resource.
    if action == "agavejobs":
        rx.on_failure("Agave job callbacks are no longer supported")
    elif action == "aloejobs":
        try:
            # Aloe jobs POST their current JSON representation to
            # callback URL targets. The POST body contains a 'status' key.
            # If for some reason it doesn't, job status is determined by
            # the 'state' or 'status' URL parameter.
            if cb_agave_status is None:
                cb_agave_status = m.get("status", None)
            # Agave job message bodies include 'id' which is the jobId
            mes_agave_job_id = m.get("id", None)
            rx.logger.debug("aloe_status: {}".format(cb_agave_status))
            if cb_agave_status is not None:
                cb_agave_status = cb_agave_status.upper()
        except Exception as exc:
            rx.on_failure(
                "Aloe callback POST and associated URL parameters were missing some required fields",
                exc,
            )

        # If the job status is 'RUNNING' then use a subset of the POST for
        # event.data. Otherwise, create an event.data from the most recent
        # entry in the Agave job history. One small detail to note is that
        # callbacks are sent at the beginning of event processing in the
        # Agave jobs service and so a handful of fields in the job record
        # that are late bound are not yet populated when the event is sent.
        if cb_agave_status == "RUNNING":
            cb_data = minify_job_dict(dict(m))
        else:
            cb_data = {"status": cb_agave_status}
            # Fetch latest history entry to put in event.data
            try:
                # Is there a better way than grabbing entire history that can
                # be implemented in a pure Agave call? Alternatively, we could
                # cache last offset for this job in rx.state but that will
                # limit our scaling to one worker
                #
                agave_job_latest_history = rx.client.jobs.getHistory(
                    jobId=mes_agave_job_id,
                    limit=100)[-1].get("description", None)
                if agave_job_latest_history is not None:
                    cb_data["description"] = agave_job_latest_history
            except Exception as agexc:
                rx.logger.warning("Failed to get history for {}: {}".format(
                    mes_agave_job_id, agexc))

        # Map the Agave job status to an PipelineJobsEvent name
        if cb_event_name is None and cb_agave_status is not None:
            cb_event_name = AgaveEvents.agavejobs.get(cb_agave_status,
                                                      "update")
            rx.logger.debug("Status: {} => Event: {}".format(
                cb_agave_status, cb_event_name))

        # Event name and data can be updated as part of processing an Agave POST
        # so apply the current values to event_dict here
        event_dict["name"] = cb_event_name
        event_dict["data"] = cb_data

    # Sanity check event_dict and token
    if event_dict["uuid"] is None or event_dict[
            "name"] is None or cb_token is None:
        rx.on_failure("No actionable event was received.")

    # Instantiate a job instance to leverage the MPJ framework
    store = ManagedPipelineJobInstance(rx.settings.mongodb,
                                       event_dict["uuid"],
                                       agave=rx.client)

    # Handle event...
    try:

        # First, proxy events. This code forwards index and indexed events to the jobs-indexer
        # Proxy 'index'
        if event_dict["name"] == "index":
            rx.logger.info("Forwarding 'index'")
            index_mes = {
                "name": "index",
                "uuid": event_dict["uuid"],
                "token": event_dict["token"],
            }
            rx.send_message(rx.settings.pipelines.job_indexer_id,
                            index_mes,
                            retryMaxAttempts=10)
            # Disable this since it should be picked up via events-manager subscription
            # message_control_annotator(up_job, ["INDEXING"], rx)

        # Proxy 'indexed'
        elif event_dict["name"] == "indexed":
            rx.logger.info("Forwarding 'indexed'")
            index_mes = {
                "name": "indexed",
                "uuid": event_dict["uuid"],
                "token": event_dict["token"],
            }
            rx.send_message(rx.settings.pipelines.job_indexer_id,
                            index_mes,
                            retryMaxAttempts=10)
            # Disable this since it should be picked up via events-manager subscription
            # message_control_annotator(up_job, ["FINISHED"], rx)

        # Handle all other events
        else:
            rx.logger.info("Handling '{}'".format(event_dict["name"]))
            # Get the current state of the MPJ. We use this to detect if
            # handling the event has resulted in a change of state
            store_state = store.state
            last_event = store.last_event

            # Send event at the beginning of state change so subscribers can pick
            # up, for instance, a case where the job receives an index event and
            # is in the FINISHED state.
            if rx.settings.state_enter:
                forward_event(event_dict["uuid"], event_dict['name'],
                              store_state, {'last_event': last_event}, rx)

            up_job = store.handle(event_dict, cb_token)
            if rx.settings.state_exit:
                forward_event(up_job["uuid"], event_dict['name'],
                              up_job["state"],
                              {"last_event": up_job["last_event"]}, rx)

    except Exception as exc:
        rx.on_failure("Event not processed", exc)

    rx.on_success("Processed event in {} usec".format(rx.elapsed()))
Пример #55
0
def make_attrdict(*args, local_dict=None):
    assert local_dict is not None and isinstance(local_dict, dict)
    if isinstance(args[0], list):
        args = args[0]
    return AttrDict({k: local_dict[k] for k in args})
Пример #56
0
 def _todict(row):
     return AttrDict(
         dict(zip([x[0].lower() for x in row.cursor_description], row)))
Пример #57
0
                                    decoding_lib=args.decoding_lib,
                                    use_fp16_decoding=args.use_fp16_decoding)

    # Set evaluate mode
    transformer.eval()

    enc_output, mem_seq_len = generate_encoder_result(
        args.infer_batch_size, args.max_length, args.d_model,
        "float16" if args.use_fp16_decoding else "float32")
    with paddle.no_grad():
        for i in range(100):
            # For warmup.
            if 50 == i:
                start = time.time()
            transformer.decoding(enc_output=enc_output,
                                 memory_seq_lens=mem_seq_len)
        logger.info("Average test time for decoding is %f ms" %
                    ((time.time() - start) / 50 * 1000))


if __name__ == "__main__":
    ARGS = parse_args()
    yaml_file = ARGS.config
    with open(yaml_file, 'rt') as f:
        args = AttrDict(yaml.safe_load(f))
        pprint(args)
    args.decoding_lib = ARGS.decoding_lib
    args.use_fp16_decoding = ARGS.use_fp16_decoding

    do_predict(args)
Пример #58
0
    options.model_augmented = False

    return options


###############################################################################
#                                   Driver                                    #
###############################################################################
if __name__ == "__main__":

    #=== Hyperparameters ===#
    with open('../config_files/hyperparameters_vaeiaf.yaml') as f:
        hyperp = yaml.safe_load(f)
    if len(sys.argv) > 1:  # if run from scheduler
        hyperp = command_line_json_string_to_dict(sys.argv[1], hyperp)
    hyperp = AttrDict(hyperp)

    #=== Options ===#
    with open('../config_files/options_vaeiaf.yaml') as f:
        options = yaml.load(f, Loader=yaml.FullLoader)
    options = AttrDict(options)
    options = add_options(options)
    options.posterior_iaf = True

    #=== File Names ===#
    project_paths = FilePathsProject(options)
    filepaths = FilePathsPredictionAndPlotting(hyperp, options, project_paths)

    #=== Predict and Save ===#
    predict_and_plot(hyperp, options, filepaths)
Пример #59
0
    image_save = "./FASHION_MINST_images/" + str(index)
    dload_path = Path(dload)
    plots = Path(dload + "/plots")
    if not Path.exists(dload_path):
        Path.mkdir(dload_path)
    if not Path.exists(plots):
        Path.mkdir(plots)

    torch.manual_seed(0)

    # Hyper parameters
    cwd = Path.cwd()
    with open(str(cwd / "config.yml")) as handle:
        config = yaml.load(handle, Loader=yaml.FullLoader)
        config_dict = config.copy()
        config = AttrDict(config)

    with open(str(dload + "/" + "config.yml"), "w") as handle:
        yaml.dump(config_dict, handle)

    # Load data and preprocess
    trainset = torchvision.datasets.FashionMNIST(root='./data_2',
                                                 train=True,
                                                 download=True,
                                                 transform=transform)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=config.batch_size,
                                              shuffle=True,
                                              num_workers=4)
    testset = torchvision.datasets.FashionMNIST(root='./data_2',
                                                train=False,
Пример #60
0
def train(args, model):
    if not osp.isdir(args.root):
        os.makedirs(args.root)

    with open(osp.join(args.root, 'args.yaml'), 'w') as f:
        yaml.dump(args.__dict__, f)

    train_ds = EMNIST(train=True, class_range=args.class_range)
    eval_ds = EMNIST(train=False, class_range=args.class_range)
    train_loader = torch.utils.data.DataLoader(
        train_ds,
        batch_size=args.train_batch_size,
        shuffle=True,
        num_workers=4)

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, T_max=len(train_loader) * args.num_epochs)

    if args.resume:
        ckpt = torch.load(osp.join(args.root, 'ckpt.tar'))
        model.load_state_dict(ckpt.model)
        optimizer.load_state_dict(ckpt.optimizer)
        scheduler.load_state_dict(ckpt.scheduler)
        logfilename = ckpt.logfilename
        start_epoch = ckpt.epoch
    else:
        logfilename = osp.join(
            args.root, 'train_{}.log'.format(time.strftime('%Y%m%d-%H%M')))
        start_epoch = 1

    logger = get_logger(logfilename)
    ravg = RunningAverage()

    if not args.resume:
        logger.info('Total number of parameters: {}\n'.format(
            sum(p.numel() for p in model.parameters())))

    for epoch in range(start_epoch, args.num_epochs + 1):
        model.train()
        for (x, _) in tqdm(train_loader):
            batch = img_to_task(x,
                                max_num_points=args.max_num_points,
                                device='cuda')
            optimizer.zero_grad()
            outs = model(batch, num_samples=args.train_num_samples)
            outs.loss.backward()
            optimizer.step()
            scheduler.step()

            for key, val in outs.items():
                ravg.update(key, val)

        line = f'{args.model}:{args.expid} epoch {epoch} '
        line += f'lr {optimizer.param_groups[0]["lr"]:.3e} '
        line += ravg.info()
        logger.info(line)

        if epoch % args.eval_freq == 0:
            logger.info(eval(args, model) + '\n')

        ravg.reset()

        if epoch % args.save_freq == 0 or epoch == args.num_epochs:
            ckpt = AttrDict()
            ckpt.model = model.state_dict()
            ckpt.optimizer = optimizer.state_dict()
            ckpt.scheduler = scheduler.state_dict()
            ckpt.logfilename = logfilename
            ckpt.epoch = epoch + 1
            torch.save(ckpt, osp.join(args.root, 'ckpt.tar'))

    args.mode = 'eval'
    eval(args, model)