Exemple #1
0
    def people(self):
        """ Returns the people linked to this content or None.

        The context specific function is temporarily stored on the
        ``context_specific_function`` attribute on each object in the
        resulting list.

        """

        if not self.content.get('people'):
            return None

        people = OrderedDict(self.content['people'])

        query = PersonCollection(object_session(self)).query()
        query = query.filter(Person.id.in_(people.keys()))

        result = []

        for person in query.all():
            person.context_specific_function = people[person.id.hex]
            result.append(person)

        order = list(people.keys())
        result.sort(key=lambda p: order.index(p.id.hex))

        return result
 def git_list(args, script_path):
     ariane_git_repos = OrderedDict(sorted(json.load(
         open(script_path+"/resources/sources/ariane.community.git.repos-SNAPSHOT.json")).items(),
         key=lambda t: t[0]))
     if args.addon:
         print("\nExisting Ariane addon git repositories :\n")
         print('{:40} {:110}'.format("Ariane git repository name", "Ariane git repository URL"))
         print('{:40} {:110}'.format("--------------------------", "-------------------------"))
         for key in ariane_git_repos.keys():
             git_repo = ariane_git_repos[key]
             if git_repo['type'] == "addon":
                 print('{:40} {:110}'.format(key, git_repo['url']))
     elif args.core:
         print("\nExisting Ariane core git repositories :\n")
         print('{:40} {:110}'.format("Ariane git repository name", "Ariane git repository URL"))
         print('{:40} {:110}'.format("--------------------------", "-------------------------"))
         for key in ariane_git_repos.keys():
             git_repo = ariane_git_repos[key]
             if git_repo['type'] == "core":
                 print('{:40} {:110}'.format(key, git_repo['url']))
     else:
         print("\nExisting Ariane git repositories :\n")
         print('{:40} {:110} {:25}'.format("Ariane git repository name", "Ariane git repository URL",
                                           "Ariane git repository type"))
         print('{:40} {:110} {:25}'.format("--------------------------", "-------------------------",
                                           "--------------------------"))
         for key in ariane_git_repos.keys():
             git_repo = ariane_git_repos[key]
             print('{:40} {:110} {:25}'.format(key, git_repo['url'], git_repo['type']))
def test():
    from collections import OrderedDict as StdlibOrderedDict

    ordered_dict = OrderedDict(((1, 'a'), (2, 'b'), (3, 'c')))
    stdlib_ordered_dict = StdlibOrderedDict(((1, 'a'), (2, 'b'), (3, 'c')))
    
    assert ordered_dict == stdlib_ordered_dict
    assert stdlib_ordered_dict == ordered_dict
    assert ordered_dict.items() == stdlib_ordered_dict.items()
    assert ordered_dict.keys() == stdlib_ordered_dict.keys()
    assert ordered_dict.values() == stdlib_ordered_dict.values()
    
    ordered_dict.move_to_end(1)
    
    assert ordered_dict != stdlib_ordered_dict
    #assert stdlib_ordered_dict != ordered_dict
    assert ordered_dict.items() != stdlib_ordered_dict.items()
    assert ordered_dict.keys() != stdlib_ordered_dict.keys()
    assert ordered_dict.values() != stdlib_ordered_dict.values()
    
    del stdlib_ordered_dict[1]
    stdlib_ordered_dict[1] = 'a'
    
    assert ordered_dict == stdlib_ordered_dict
    assert stdlib_ordered_dict == ordered_dict
    assert ordered_dict.items() == stdlib_ordered_dict.items()
    assert ordered_dict.keys() == stdlib_ordered_dict.keys()
    assert ordered_dict.values() == stdlib_ordered_dict.values()
    
    assert ordered_dict == OrderedDict(stdlib_ordered_dict) == \
                                                            stdlib_ordered_dict
    assert ordered_dict == StdlibOrderedDict(ordered_dict) == \
                                                            stdlib_ordered_dict
    
def drawing_lines(start_date, end_date):

    (positive_dict, negative_dict, neutral_dict) = get_sentiment_dates(start_date, end_date)
    positive_dict = OrderedDict(sorted(positive_dict.items(), key=lambda t: t[0]))
    pos_keys = positive_dict.keys()
    pos_vals = positive_dict.values()
    pos_keys = pos_keys[-30:]  # get the last 30 days

    negative_dict = OrderedDict(sorted(negative_dict.items(), key=lambda t: t[0]))
    neg_keys = negative_dict.keys()
    neg_vals = negative_dict.values()
    neg_keys = neg_keys[-30:]  # get the last 30 days

    neutral_dict = OrderedDict(sorted(neutral_dict.items(), key=lambda t: t[0]))
    neu_keys = neutral_dict.keys()
    neu_vals = neutral_dict.values()
    neu_keys = neu_keys[-30:]  # get the last 30 days

    figure_title = 'Sentiment between ' + start_date + ' and ' + end_date
    fig, ax = plt.subplots()
    ax.plot(pos_keys, pos_vals, 'o-', label='Positive')
    ax.plot(neg_keys, neg_vals, 'o-', label='Negative')
    ax.plot(neu_keys, neu_vals, 'o-', label='Neutral')
    fig.autofmt_xdate()
    plt.legend(shadow=True, fancybox=True)
    plt.title(figure_title)
    plt.show()

    return
Exemple #5
0
class akmers :
  def __init__(self) :
    self.mers = OrderedDict()
    self.smers_set = set()
  #*******************************************
  def get_mers(self) :
    return set(self.mers.keys())
  #*******************************************
  def update_smer_set(self) :
    self.smers_set = set(self.mers.keys())
  #*******************************************
  def add_mer(self, mer, count) :
    self.mers[mer] = count
   #*******************************************
  def remove_mer(self, mer) :
    del self.mers[mer]
  #*******************************************
  def has_mers(self) :
    if len(self.mers) > 0 and max(self.mers.values()) > 1 :
      return True
    else :
      return False
  #*******************************************
  def get_count(self, mer) :
    return self.mers[mer]
Exemple #6
0
class MemObject(object):
    '''
    The superclass for all objects parsed from memory captures.
    '''
    def __init__(self, offset):
        '''
        All memobj attributes are stored in the fields dictionary

        @offset: the offset in the memory image of this object
        '''
        # Using ordereddict gets us free logical ordering when printing results
        self.fields = OrderedDict()

        self.fields['offset'] = offset
    

    def get_field_keys(self):
        '''
        Since using ordereddict, prettier ordering based on the programmatic 
        order that fields are entered

        @return: an ordered list of fields keys for the memobj
        '''
        return self.fields.keys()


    def __str__(self):
        '''
        @return: string of all fields of a memobj
        '''
        return "".join(["%s: %s\t" % (elem, self.fields[elem]) for elem in self.fields.keys()])
Exemple #7
0
def plotInLen(r,freqs,fits,title):
    nc = 3
    minLength=8
    maxLength=25
    fig, axes = plt.subplots(
        nrows=int((maxLength-minLength+1)/2/nc), ncols=nc, figsize=(12, 12)
        )
    index = 0
    for (length,freqs) in freqs.items():
        if length>=minLength and length<=maxLength and length%2==1:
            sortedFreqs = OrderedDict(sorted(freqs.items(),key=lambda t: t[0], reverse=False))
            axes[int((index)/nc),(index)%(nc)].plot(
                list(sortedFreqs.keys()),list(sortedFreqs.values()),'o'
                )
            m, c = fits[length-1]
            axes[int((index)/nc),(index)%(nc)].plot(
                list(sortedFreqs.keys()),
                [(10**(c)*xi**(m)) for xi in sortedFreqs.keys()],
                linewidth = 3,
                label = 'y = '+str("%.2f" %(10**c))+'x^'+str("%.2f" %m)
                )
            axes[int((index)/nc),(index)%(nc)].set_yscale('log')
            axes[int((index)/nc),(index)%(nc)].set_xscale('log')
            #axes[int((index)/nc),(index)%(nc)].legend()
            axes[int((index)/nc),(index)%(nc)].set_title(str(length)+'-mers')
            index+=1
    
    plt.suptitle(title,fontsize=25)
    plt.savefig(r.outputDir+'inlen.png')
Exemple #8
0
    def visualization2(self, sp_to_vis=None):
        if sp_to_vis:
            species_ready = list(set(sp_to_vis).intersection(self.all_sp_signatures.keys()))
        else:
            raise Exception('list of driver species must be defined')

        if not species_ready:
            raise Exception('None of the input species is a driver')

        for sp in species_ready:
            # Setting up figure
            plt.figure()
            plt.subplot(313)

            mon_val = OrderedDict()
            signature = self.all_sp_signatures[sp]
            for idx, mon in enumerate(list(set(signature))):
                if mon[0] == 'C':
                    mon_val[self.all_comb[sp][mon] + (-1,)] = idx
                else:
                    mon_val[self.all_comb[sp][mon]] = idx

            mon_rep = [0] * len(signature)
            for i, m in enumerate(signature):
                if m[0] == 'C':
                    mon_rep[i] = mon_val[self.all_comb[sp][m] + (-1,)]
                else:
                    mon_rep[i] = mon_val[self.all_comb[sp][m]]
            # mon_rep = [mon_val[self.all_comb[sp][m]] for m in signature]

            y_pos = numpy.arange(len(mon_val.keys()))
            plt.scatter(self.tspan[1:], mon_rep)
            plt.yticks(y_pos, mon_val.keys())
            plt.ylabel('Monomials', fontsize=16)
            plt.xlabel('Time(s)', fontsize=16)
            plt.xlim(0, self.tspan[-1])
            plt.ylim(0, max(y_pos))

            plt.subplot(312)

            for name in self.model.odes[sp].as_coefficients_dict():
                mon = name
                mon = mon.subs(self.param_values)
                var_to_study = [atom for atom in mon.atoms(sympy.Symbol)]
                arg_f1 = [numpy.maximum(self.mach_eps, self.y[str(va)][1:]) for va in var_to_study]
                f1 = sympy.lambdify(var_to_study, mon)
                mon_values = f1(*arg_f1)
                mon_name = str(name).partition('__')[2]
                plt.plot(self.tspan[1:], mon_values, label=mon_name)
            plt.ylabel('Rate(m/sec)', fontsize=16)
            plt.legend(bbox_to_anchor=(-0.1, 0.85), loc='upper right', ncol=1)

            plt.subplot(311)
            plt.plot(self.tspan[1:], self.y['__s%d' % sp][1:], label=parse_name(self.model.species[sp]))
            plt.ylabel('Molecules', fontsize=16)
            plt.legend(bbox_to_anchor=(-0.15, 0.85), loc='upper right', ncol=1)
            plt.suptitle('Tropicalization' + ' ' + str(self.model.species[sp]))

            # plt.show()
            plt.savefig('s%d' % sp + '.png', bbox_inches='tight', dpi=400)
Exemple #9
0
 def getContourLevels(self,dmin,dmax,imt):
     #groupings taken from table on https://en.wikipedia.org/wiki/Peak_ground_acceleration
     if imt == 'pgv':
         #table of minimum dmax and dinc levels
         dmax_dinc = OrderedDict([(1.1,0.1),
                                  (3.4,0.25),
                                  (8.1,0.5),
                                  (16.0,2.0),
                                  (31.0,5.0),
                                  (60.0,10.0),
                                  (116.0,10.0),
                                  (200.0,25.0)])
         keys = np.array(list(dmax_dinc.keys()))
         didx = np.where(keys < dmax)[0].max()
         dinc = dmax_dinc[keys[didx]]
         newdmin = self.round_to(dmin,dinc)
         newdmax = self.round_to(dmax,dinc)
     else:
         dmax_dinc = OrderedDict([(0.014*100,0.1),
                                  (0.039*100,0.5),
                                  (0.092*100,1.0),
                                  (0.18*100,2.5),
                                  (0.34*100,5.0),
                                  (0.65*100,10.0),
                                  (1.24*100,15.0),
                                  (3.0*100,37.5)])
         keys = np.array(list(dmax_dinc.keys()))
         didx = np.where(keys < dmax)[0].max()
         dinc = dmax_dinc[keys[didx]]
         newdmin = self.round_to(dmin,dinc)
         newdmax = self.round_to(dmax,dinc)
     levels = np.arange(newdmin,newdmax+dinc,dinc)
     return levels
def test_update():
    '''test the update function'''
    # do we really add nothing if add==False ?
    d = poheader.update({}, test='hello')
    assert len(d) == 0
    # do we add if add==True ?
    d = poheader.update({}, add=True, Test='hello')
    assert len(d) == 1
    assert d['Test'] == 'hello'
    # do we really update ?
    d = poheader.update({'Test': 'hello'}, add=True, Test='World')
    assert len(d) == 1
    assert d['Test'] == 'World'
    # does key rewrite work ?
    d = poheader.update({}, add=True, test_me='hello')
    assert d['Test-Me'] == 'hello'
    # is the order correct ?
    d = OrderedDict()
    d['Project-Id-Version'] = 'abc'
    d['POT-Creation-Date'] = 'now'
    d = poheader.update(d, add=True, Test='hello', Report_Msgid_Bugs_To='*****@*****.**')
    assert d.keys()[0] == "Project-Id-Version"
    assert d.keys()[1] == "Report-Msgid-Bugs-To"
    assert d.keys()[2] == "POT-Creation-Date"
    assert d.keys()[3] == "Test"
Exemple #11
0
def scalar_per_tract_mean_std(tractographies, scalar):
    try:

        results = OrderedDict((
            ('tract file #', []),
            ('per tract distance weighted mean %s' % scalar, []),
            ('per tract distance weighted std %s' % scalar, [])
        ))
        for j, tractography in enumerate(tractographies):
            scalars = tractography.tracts_data()[scalar]
            weighted_scalars = numpy.empty((len(tractography.tracts()), 2))
            for i, t in enumerate(tractography.tracts()):
                tdiff = numpy.sqrt((numpy.diff(t, axis=0) ** 2).sum(-1))
                length = tdiff.sum()
                values = scalars[i][1:].squeeze()
                average = numpy.average(values, weights=tdiff)
                weighted_scalars[i, 0] = average
                weighted_scalars[i, 1] = length
            mean = numpy.average(weighted_scalars[:, 0], weights=weighted_scalars[:, 1])
            std = numpy.average((weighted_scalars[:, 0] - mean) ** 2, weights=weighted_scalars[:, 1])
            results[results.keys()[0]].append(j)
            results[results.keys()[1]].append(float(mean))
            results[results.keys()[2]].append(float(std))

        return results
    except KeyError:
        raise ValueError("Tractography does not contain this scalar data")
Exemple #12
0
    def plotInLen(self,freqs,fits,title,minLength,maxLength):
        '''plots integrals of the frequencies and their fits
        '''
        nc = 3
        fig, axes = plt.subplots(
            nrows=int((maxLength-minLength+1)/2/nc), ncols=nc, figsize=(12, 12)
            )
        index = 0
        for (length,freqs) in freqs.items():
            if length>=minLength and length<=maxLength and length%2==1:
                sortedFreqs = OrderedDict(sorted(freqs.items(),key=lambda t: t[0], reverse=False))
                axes[int((index)/nc),(index)%(nc)].plot(
                    list(sortedFreqs.keys()),list(sortedFreqs.values()),'o'
                    )
                try:
                    m, c = fits[length-1]
                except KeyError:
                    m, c = (0,0)
                else:
                    axes[int((index)/nc),(index)%(nc)].plot(
                        list(sortedFreqs.keys()),
                        [(10**(c)*xi**(m)) for xi in sortedFreqs.keys()],
                        linewidth = 3,
                        label = 'y = '+str("%.2f" %(10**c))+'x^'+str("%.2f" %m)
                        )
                axes[int((index)/nc),(index)%(nc)].set_yscale('log')
                axes[int((index)/nc),(index)%(nc)].set_xscale('log')
                #axes[int((index)/nc),(index)%(nc)].legend()
                axes[int((index)/nc),(index)%(nc)].set_title(str(length)+'-mers')
                index+=1

        plt.suptitle(title,fontsize=25)
        plt.savefig(os.path.join(
            self.outputDir,'inlen'+str(self.trajectory[2])+'.pdf'))
        plt.savefig(os.path.join(self.outputDir,'inlen'+str(self.trajectory[2])+'.png'))
Exemple #13
0
class T(unittest.TestCase):

    def init(self, k1, k2, setup=None):
        print('')
        self.res = OD({k1: 0})
        self.res[k2] = 0
        print('\n%s\n' % ('- ' * 40))
        print ('Test: "%s" vs "%s"' % tuple(self.res.keys()))
        print('\n%s\n' % ('- ' * 40))
        if setup:
            print('setup: %s' % setup)
        return self.res

    def tearDown(self):
        print('')
        for k, dt in self.res.items():
            print('dt was %s for %s' % (dt, k))
        vs = list(self.res.values())
        q = vs[0] / vs[1]
        k1, k2 = self.res.keys()
        if q > 1:
            k1, k2 = k2, k1
            q = vs[1] / vs[0]
        msg = ('=> "%s" is better than "%s" by:' % (k1, k2))
        print(msg)
        print('\n>>>> %.2f <<<<<' % q)
        R.append((q, msg))

    def run_(self, k, expr, setup='pass', number=number):
        # min? read timeit module doc, repeat function:
        print('test expression for "%s":' % k)
        print(expr)
        self.res[k] = min(Timer(expr, setup=setup).repeat(10, number))
Exemple #14
0
class TimedObjCache(Cache):
	def __init__(self, expiration=3600, file=None):
		super().__init__(file)
		
		self._data =  OrderedDict()
		self.expiration = expiration
	
	def _prune(self):
		old = []
		for key in self._data.keys():
			data, added = self._data[key]
			time_since = time() - added
			if time_since >= self.expiration:
				old.append((key, data))
				del self._data[key]
			else:
				break
		return old
	
	def get(self, key):
		self._prune()
		
		if key in self._data.keys():
			return self._data[key][0]
		return None
	
	def store(self, key, data):
		self._data[key] = (data, time())
	
	def data(self):
		return self._data
	
	def __iter__(self):
		self._data.__iter__()
class MoleculeType:

	def __init__(self, ID):

		self.ID = ID
		self.atoms = OrderedDict()

		self.bonds = []
		self.angles = []
		self.dihedrals = []
		self.impropers = []


	def AddAtom(self, ID, typeID, position):

		if ID in self.atoms.keys():
			raise NameError("ERROR: atom["+ID+"] is already in the molecule.")

		##TODO: check that the typeID exists? naaaa
		atom = Atom(ID,typeID,position)
		self.atoms[ID] = atom
		return atom


	def Print(self):

		print 'MoleculeType ['+self.ID+']:'
		print 'Atoms:'
		for aID in self.atoms.keys():
			a = self.atoms[aID]
			a.Print()
		print 'Bonds:'
		print str(self.bonds)
def main():
    args = arguments()
    motif_sites = load_sitecounts(args.input_file, args.cutoff, args.proxyBED)
    if args.output_dir:
        outdir = args.output_dir
    else:
        outdir = ''
    double_sitecounts = OrderedDict()
    for motif in os.listdir(args.dirname):
        motevo_output_file = os.path.join(args.dirname, motif)
        double_sitecounts.setdefault(motif, 0)
        double_sitecounts[motif], dist = \
                count_nonoverlapping_motifs(motif_sites, motevo_output_file, args.cutoff)
        fname = os.path.join(outdir, '%s.dist' % motif)
        with open(fname, 'w') as outf:
            for i, d in enumerate(dist):
                outf.write('\t'.join([
                    str(i),
                    str(d) + '\n',
                ]))

    with open( os.path.join(outdir, os.path.basename(args.input_file)), 'w') as outf:
        outf.write('\t'.join([motif for motif in double_sitecounts.keys()]) + '\n')
        for region in motif_sites.keys():
            outf.write(region + '\t')
            outf.write('\t'.join([str(double_sitecounts[motif][region]) for motif in double_sitecounts.keys()]))            
            outf.write('\n')
    return 0
Exemple #17
0
def humans_per_hour(game, **kwargs):
	data = []
	end_date = min(timezone.now(), game.end_date)
	end_td = end_date - game.start_date
	end_hour = end_td.days * 24 + round(end_td.seconds / 3600, 0)
	for dorm, dormName in DORMS:
		sh = game.get_active_players().filter(dorm=dorm).count() # starting humans in this dorm
		d = OrderedDict([(0, sh)])
		kills = Kill.objects.exclude(parent=None).filter(victim__game=game, victim__dorm=dorm).order_by('date')
		for index, kill in enumerate(kills, 1):
			kd = max(kill.date, game.start_date) - game.start_date
			hours = kd.days * 24 + round(kd.seconds / 3600, 1)
			d[min(hours, end_hour)] = sh - index # overwrite
		if end_hour not in d:
			d[end_hour] = d[d.keys()[-1]]
		data.append({'name': dormName, 'data': d.items()})
	# add dataset for all dorms
	sh = game.get_active_players().count() - Kill.objects.filter(parent=None, killer__game=game).count() # subtract LZs
	d = OrderedDict([(0, sh)])
	kills = Kill.objects.exclude(parent=None).filter(victim__game=game).order_by('date')
	for index, kill in enumerate(kills, 1):
		kd = max(kill.date, game.start_date) - game.start_date
		hours = kd.days * 24 + round(kd.seconds / 3600, 1)
		d[min(hours, end_hour)] = sh - index # overwrite
	if end_hour not in d:
		d[end_hour] = d[d.keys()[-1]]
	data.append({'name': 'ALL', 'data': d.items()})
	return data
Exemple #18
0
    def addParameters(self, **kwargs):
        """
        Add special parameters for birthday widget *messages.birthday.\**
        :param kwargs: list of parameters for update
        """
        if 'message' in kwargs:
            content = kwargs['message'].get('content')
            template = kwargs['message'].get('template')
            n = int(kwargs['message'].get('number'))
            orientation = kwargs['message'].get('orientation')
        else:
            content = Settings.get('messages.birthday.content')
            template = ""  # todo define templates
            n = int(Settings.get('messages.birthday.number', 20))
            orientation = Settings.get('messages.birthday.orientation', 20)

        persons = sorted(Person.getPersons(onlyactive=True), key=lambda p: p.birthday)
        p = [(val.birthday - int(datetime.datetime.now().strftime('%j')), idx, persons[idx].lastname, persons[idx].birthdate) for (idx, val) in enumerate(persons)]
        idx = min(filter(lambda x: x[0] >= 0, p))

        person = OrderedDict()
        try:
            for i in range(idx[1] - (n / 2) + 1, idx[1] + (n / 2) + 2):
                _p = persons[i % (len(p))]
                if _p.birthdate.strftime('%d.%m.') not in person.keys():
                    person[_p.birthdate.strftime('%d.%m.')] = []
                person[_p.birthdate.strftime('%d.%m.')].append(_p)
            for _p in persons:
                if _p.birthdate.strftime('%d.%m.') in person.keys() and _p not in person[_p.birthdate.strftime('%d.%m.')]:
                    person[_p.birthdate.strftime('%d.%m.')].append(_p)
        except:
            pass

        kwargs.update({'content': content, 'template': template, 'persons': person, 'daynum': int((datetime.datetime.now()).strftime('%j')), 'orientation': orientation})
        self.params = kwargs
Exemple #19
0
def _read_header_tags(s):
    """Read the header tags and return an OrderedDict.

    Each item in `tags` is a dict as returned by _ptu_read_tag().
    The input `s` is a binary-string containing the raw binary data file.
    """
    offset = 16                # initial bytes to skip
    FileTagEnd = "Header_End"  # Last tag of the header (BLOCKEND)
    tag_end_offset = s.find(FileTagEnd.encode()) + len(FileTagEnd)

    tags = OrderedDict()
    tagname, tag, offset = _ptu_read_tag(s, offset)
    tags[tagname] = tag
    while offset < tag_end_offset:
        tagname, tag, offset = _ptu_read_tag(s, offset)
        # In case a `tagname` appears multiple times, we make a list
        # to hold all the tags with the same name
        if tagname in tags.keys():
            if not isinstance(tags[tagname], list):
                tags[tagname]=[tags[tagname]]
            tags[tagname].append(tag)
        else:
            tags[tagname] = tag

    # Make sure we have read the last tag
    assert list(tags.keys())[-1] == FileTagEnd
    return tags, offset
class WinWMIMonitors(Monitors):
    def __init__(self):
        if platform.system() != "Windows": raise OSError("at last is  vista")
        self.wmi = wmi.WMI(namespace="root/wmi")
        self.data = OrderedDict()
        for bn in self.wmi.WmiMonitorBrightness():
            self.data[bn.InstanceName] = WinWMIMonitor(self.wmi, bn.InstanceName)

    def __getitem__(self, k):
        if isinstance(k, int):
            return self.data[self.data.keys()[k]]
        elif isinstance(k, basestr):
            return self.data[k]
        else:
            raise KeyError("int or instance name, got {!r}".format(k))


    def __repr__(self):   
        return "<Monitors{!r}>".format(self.data.keys())

    def max(self):
        for k in self.data:
            self.data[k].max()

    def min(self):
        for k in self.data:
            self.data[k].min()

    def percent(self, percents):
        for k in self.data:
            self.data[k].percents(percents)

    def reset():
        for k in self.data:
            self.data[k].reset()
def fixgridsearch(hparamfile,generate):

    hparams = OrderedDict()
    dhparams = OrderedDict()

    for hparam in HparamReader(hparamfile):

        if "generate" not in hparam or hparam["generate"] in ["default",""]:
            if hparam["generate"]=="":
                print "*** Warning ***"
                print "    Hyperparameter",hparam["hparam"]
                print "    Please set generation mode : default"

            hparam["generate"] = generate

        dhparams[hparam['hparam']] = hparam.pop("default")

        name = hparam.pop("hparam")
        hparams[name] = hparams.get(name,[]) + list(make_hparams(**hparam))

    values = np.zeros((sum([len(hparam) for hparam in hparams.values()]),len(hparams.keys())))

    j = 0
    for i, hparam in enumerate(hparams.items()):
        # set all default values
        values[j:j+len(hparam[1])] = np.array(dhparams.values())
        # set the value of the current hyper-parameter
        values[j:j+len(hparam[1]),i] = np.array(hparam[1])

        j += len(hparam[1])

    return hparams.keys(), values
Exemple #22
0
    def __init__(self, parent, serviceManager, recordService):
        self.parent = parent
        self.recordService = recordService
        if serviceManager:
            self.serviceManager = serviceManager
            self.cv_service = serviceManager.get_cv_service()
            self.series_service = serviceManager.get_series_service()
            offsetChoices = OrderedDict((x.description, x.id) for x in
                                        self.cv_service.get_offset_type_cvs())
            self.offSetTypeChoices = [NULL] + offsetChoices.keys()

            labChoices = OrderedDict((x.lab_sample_code, x.id) for x in self.cv_service.get_samples())

            self.censorCodeChoices = [NULL] + [x.term for x in self.cv_service.get_censor_code_cvs()]
            self.labSampleChoices = [NULL] + labChoices.keys()

            self.qualifierChoices = OrderedDict((x.code + ':' + x.description, x.id)
                                           for x in self.series_service.get_all_qualifiers() if x.code and x.description)
            self.qualifierCodeChoices = [NULL] + self.qualifierChoices.keys() + [NEW]

        else:
            self.censorCodeChoices = [NULL] + ['SampleCensorCode1'] + ['SampleCensorCode2'] + ['SampleCensorCode3']
            self.labSampleChoices = [NULL] + ['SampleLabSample1'] + ['SampleLabSample2'] + ['SampleLabSample3']
            self.offSetTypeChoices = [NULL] + ['SampleOffsetType1'] + ['SampleOffsetType2'] + ['SampleOffsetType3']
            self.qualifierCodeChoices = [NULL] + ['SampleQualifierCode1'] + ['SampleQualifierCode2'] + ['SampleQualifierCode3']
	def rawts2pdseries(self,rawData):
		rawData = OrderedDict([(key,d[key]) for d in rawData for key in d])
		for key in rawData.keys():
			newKey = datetime.strptime(key, self.bdStrFormat).replace(tzinfo=self.utc).astimezone(self.pst).replace(tzinfo=None)
			rawData[newKey] = rawData.pop(key)
		pdseries = pd.Series(data=rawData.values(),index=rawData.keys())
		return pdseries
Exemple #24
0
class Model:
	def __init__( self ):
		self.MinPosition = Vector3( [ sys.float_info.max, sys.float_info.max, sys.float_info.max ] )
		self.MaxPosition = Vector3( [ -sys.float_info.max, -sys.float_info.max, -sys.float_info.max ] )
		self.Positions = []
		self.UVs = []
		self.Normals = []
		self.Meshes = []
		self.Vertices = OrderedDict()
		self.MaterialLib = None
		self.GenerateCollisionData = False

	def Compile( self, filename ):
		if INVERT_Z_COMPONENT:
			for i in range( len( self.Positions ) ):
				self.Positions[ i ].z = - self.Positions[ i ].z
				
		# AABB calculation
		for pos in self.Positions:
			self.MinPosition = self.MinPosition.Min( Vector3( [ pos.x, pos.y, pos.z ] ) )
			self.MaxPosition = self.MaxPosition.Max( Vector3( [ pos.x, pos.y, pos.z ] ) )

		with zipfile.ZipFile( filename, "w", zipfile.ZIP_DEFLATED ) as zf:
			# TODO: Write out the vertex description so it's not hard coded in the game

			# Be very careful re-ordering anything below! Many of these Compile calls have
			# side effects.

			model = StringIO.StringIO();
			WriteVector3( model, self.MinPosition )
			WriteVector3( model, self.MaxPosition )

			# Write out each of the meshes
			meshes = StringIO.StringIO()
			WriteUInt( meshes, len( self.Meshes ) )
			for mesh in self.Meshes:
				mesh.Compile( meshes, self.Vertices, self.MaterialLib.Materials.keys() )
			zf.writestr( "__meshes__", meshes.getvalue() )

			# Write out all the vertex data, interleaved
			WriteUInt( model, len( self.Vertices ) )
			for vtx in self.Vertices.keys():
				pos = self.Positions[ vtx.Position ]
				uv = self.UVs[ vtx.UV ]
				normal = self.Normals[ vtx.Normal ]

				WriteVector3( model, pos )
				WriteVector2( model, uv )
				WriteVector3( model, normal )
			zf.writestr( "__model__", model.getvalue() )

			self.MaterialLib.Compile( zf )

			collision = StringIO.StringIO()
			if self.GenerateCollisionData:
				WriteUInt( collision, len( self.Vertices ) )
				for vtx in self.Vertices.keys():
					pos = self.Positions[ vtx.Position ]
					WriteVector3( collision, pos )
				zf.writestr( "__collision__", collision.getvalue() )
    def get_map(file_name):
        wd, pd, cd = OrderedDict(), OrderedDict(), OrderedDict()
        with open(file_name, 'r') as f:
            for line in f:
                if line.startswith('#') or len(line) <= 1:
                    continue
                wpc = [wpc.split('|') for wpc in line.split()]
                words, poses, cates = zip(*wpc)
                wd.update([(word, None) for word in words])
                pd.update([(pos, None) for pos in poses])
                for cate in cates:
                    if cate in cd:
                        cd[cate] += 1
                    else:
                        cd[cate] = 1
        word2idx.update([(word, i) for i, word in enumerate(wd.keys())])
        pos2idx.update([(pos, i) for i, pos in enumerate(pd.keys())])
        idx2word.update([(i, word) for i, word in enumerate(wd.keys())])
        idx2pos.update([(i, pos) for i, pos in enumerate(pd.keys())])

        cc = [c for c in cd.keys() if cd[c] >= most_common]
        cate2idx.update([(c, i) for i, c in enumerate(cc)])
        idx2cate.update([(i, c) for i, c in enumerate(cc)])

        insert2map(woov, word2idx, idx2word)
        insert2map(poov, pos2idx, idx2pos)
        insert2map(coov, cate2idx, idx2cate)
Exemple #26
0
class Metric(object):

    """Metrics used by CVSS."""

    def __init__(self, name, short_name, metric_values, index=None):
        assert len(metric_values), 'At least one MetricValue needed.'
        self.__name = name
        self.__short_name = short_name
        # Create the key-value pairs. Use the MetricValue as the key.
        vals = []
        for x in metric_values:
            m = MetricValue(*x)
            vals.append((m.value, m))
        self.__values = OrderedDict(vals)
        # Use the first key available.
        if index is None:
            self.index = vals[0][0]
        else:
            assert index in self.__values.keys(), 'Not a valid key'
            self.index = index

    def __repr__(self):
        return ("{0}('{1}','{2}',{3},'{4}')".format(self.__class__.__name__,
                                                    self.name,
                                                    self.short_name,
                                                    self.values,
                                                    self.index))

    def __str__(self):
        """Use selected MetricValue as a string."""
        return str(self.selected)

    def __float__(self):
        """Use selected MetricValue as a float."""
        return float(self.selected)

    @property
    def name(self):
        return self.__name

    @property
    def short_name(self):
        return self.__short_name

    @property
    def values(self):
        return list(self.__values.values())

    @property
    def index(self):
        return self.__index

    @index.setter
    def index(self, index):
        assert index in self.__values.keys(), "Not a valid key"
        self.__index = index

    @property
    def selected(self):
        return self.__values[self.__index]
Exemple #27
0
def get_composition(atoms, basis=None):
    """ Acquire the chemical composition of an atoms object

    Returns: a dictionary of atoms and their compositions
    dictionary sorted by atomic number
    """

    symbols = atoms.get_chemical_symbols()
    count = len(symbols)

    # Collect the symbol and count of each atom type
    S = OrderedDict()

    for symbol in symbols:

        if symbol in S.keys():
            S[symbol] += 1.0
        else:
            S[symbol] = 1.0

    # Convert to composition
    for key, val in S.iteritems():
        S[key] = val / count

    if basis:
        if basis in S.keys():
            return S[basis]
        else:
            return 0.0
    else:
        return S
Exemple #28
0
def join(keys, tables):
    """Merge a list of `Table` objects using `keys` to group rows"""

    # Make new (merged) Table fields
    fields = OrderedDict()
    for table in tables:
        fields.update(table.fields)
    # TODO: may raise an error if a same field is different in some tables

    # Check if all keys are inside merged Table's fields
    fields_keys = set(fields.keys())
    for key in keys:
        if key not in fields_keys:
            raise ValueError('Invalid key: "{}"'.format(key))

    # Group rows by key, without missing ordering
    none_fields = lambda: OrderedDict({field: None for field in fields.keys()})
    data = OrderedDict()
    for table in tables:
        for row in table:
            row_key = tuple([getattr(row, key) for key in keys])
            if row_key not in data:
                data[row_key] = none_fields()
            data[row_key].update(row._asdict())

    merged = Table(fields=fields)
    merged.extend(data.values())
    return merged
def createResultDict(jobRangeStart, jobRangeEnd, workerData=False):
    masterDict = OrderedDict()

    for i in range(jobRangeStart, jobRangeEnd + 1):
        inFLTitle = "photo_album_" + str(i)
        inFL = "../results/photo_album_" + str(i) + ".results"
        with open(inFL, "r") as inp:
            inFLList = [line.replace('"', '') for line in inp]

        header = inFLList[0].split("\t")
        resultList = [line.split("\t") for line in inFLList[1:]]

        resultDict = OrderedDict()
        for i in range(0, len(resultList)):
            for j in range(0, len(header)):
                resultDict[header[j]] = resultDict.get(header[j], []) + [resultList[i][j]]

        keysOfInterest = list(filter(lambda x: re.search("Answer", x), resultDict.keys()))
        if workerData:
            keysOfInterest += list(filter(lambda x: re.search("workerid", x), resultDict.keys()))

        newDict = OrderedDict()
        for key in keysOfInterest:
            newDict[key] = resultDict[key]

        masterDict[inFLTitle] = newDict

    return masterDict
Exemple #30
0
class Cost():

    def __init__(self, cost, params, constants=None):
        self.cost = cost
        self.grads = OrderedDict()
        self.computed_cost = False

        self.params = OrderedDict()
        for p in params:
            self.params[p] = True

        self.constants = OrderedDict()
        constants = [] if constants is None else constants
        for c in constants:
            self.constants[c] = True

    def compute_gradients(self, lr, multipliers=None):
        multipliers = OrderedDict() if multipliers is None else multipliers
        grads =  T.grad(self.cost, self.params.keys(), 
                        consider_constant=self.constants.keys(),
                        disconnected_inputs='ignore')
        for param, gparam in zip(self.params.keys(), grads):
            param_lr = multipliers.get(param.name, 1.0) * lr
            self.grads[param] = param_lr * gparam
        self.computed_cost = True

    def update_gradient(self, param, new_grad):
        assert self.computed_cost
        assert self.grads.has_key(param)
        self.grads[param] = new_grad
Exemple #31
0
class MultipleFileExplorer(QtGui.QTabWidget):
    """
    Class for multiple location file explorer, capability to add GlobusFileView Tabs
    """

    sigLoginSuccess = QtCore.Signal(bool)
    sigLoginRequest = QtCore.Signal(QtCore.Signal, bool)
    sigProgJob = QtCore.Signal(str, object, list, dict, object)
    sigPulsJob = QtCore.Signal(str, object, list, dict, object)
    sigSFTPJob = QtCore.Signal(str, object, list, dict, object)
    sigOpen = QtCore.Signal(list)
    sigFolderOpen = QtCore.Signal(list)
    sigPreview = QtCore.Signal(object)

    def __init__(self, parent=None):
        super(MultipleFileExplorer, self).__init__(parent)
        self.explorers = OrderedDict()

        self.tab = TabBarPlus()
        self.setTabBar(self.tab)
        self.setTabsClosable(True)

        self.explorers['Local'] = FileExplorer(LocalFileView(self), self)
        self.addFileExplorer('Local', self.explorers['Local'], closable=False)

        self.jobtab = JobTable(self)
        # Do not understand why I need to add it and remove it so that its not added as a seperate widget
        self.addTab(self.jobtab, 'Jobs')
        self.removeTab(1)

        self.sigProgJob.connect(self.jobtab.addProgJob)
        self.sigPulsJob.connect(self.jobtab.addPulseJob)
        self.sigSFTPJob.connect(self.jobtab.addSFTPJob)

        self.tab.plusClicked.connect(self.onPlusClicked)
        self.tabCloseRequested.connect(self.removeTab)

        self.newtabmenu = QtGui.QMenu(None)
        addspot = QtGui.QAction('SPOT', self.newtabmenu)
        addcori = QtGui.QAction('Cori', self.newtabmenu)
        addedison = QtGui.QAction('Edison', self.newtabmenu)
        addbragg = QtGui.QAction('Bragg', self.newtabmenu)
        addsftp = QtGui.QAction('SFTP Connection', self.newtabmenu)
        showjobtab = QtGui.QAction('Jobs', self.newtabmenu)
        self.standard_actions = OrderedDict({
            'SPOT': addspot,
            'Cori': addcori,
            'Edison': addedison,
            'Bragg': addbragg,
            'SFTP': addsftp
        })
        self.newtabmenu.addActions(self.standard_actions.values())
        self.newtabmenu.addAction(showjobtab)
        addspot.triggered.connect(self.addSPOTTab)
        addedison.triggered.connect(lambda: self.addHPCTab('Edison'))
        addcori.triggered.connect(lambda: self.addHPCTab('Cori'))
        addbragg.triggered.connect(lambda: self.addHPCTab('Bragg'))
        addsftp.triggered.connect(self.addSFTPTab)
        showjobtab.triggered.connect(lambda: self.addTab(self.jobtab, 'Jobs'))

    def enableActions(self):
        for name, action in self.standard_actions.iteritems():
            if name in self.explorers.keys():
                action.setEnabled(False)
            else:
                action.setEnabled(True)

    def removeTab(self, p_int):
        if self.tabText(p_int) != 'Jobs':
            name = self.explorers.keys()[p_int]
            explorer = self.explorers.pop(name)
            cmanager.logout(explorer.file_view.client)
            self.widget(p_int).deleteLater()
            self.enableActions()
        super(MultipleFileExplorer, self).removeTab(p_int)

    def removeTabs(self):
        for i in xrange(1, self.count()):
            self.removeTab(1)

    def onPlusClicked(self):
        self.newtabmenu.popup(QtGui.QCursor.pos())

    def addFileExplorer(self, name, file_explorer, closable=True):
        self.explorers[name] = file_explorer
        file_explorer.file_view.sigItemPreview.connect(self.itemSelected)
        self.wireExplorerSignals(file_explorer)
        idx = len(self.explorers) - 1
        tab = self.insertTab(idx, file_explorer, name)
        if closable is False:
            try:
                self.tabBar().tabButton(tab,
                                        QtGui.QTabBar.RightSide).resize(0, 0)
                self.tabBar().tabButton(tab, QtGui.QTabBar.RightSide).hide()
            except AttributeError:
                self.tabBar().tabButton(tab,
                                        QtGui.QTabBar.LeftSide).resize(0, 0)
                self.tabBar().tabButton(tab, QtGui.QTabBar.LeftSide).hide()
        self.setCurrentWidget(file_explorer)

    def wireExplorerSignals(self, explorer):
        explorer.file_view.sigOpen.connect(self.handleOpenActions)
        explorer.file_view.sigOpenFolder.connect(self.handleOpenFolderActions)
        try:
            explorer.file_view.sigDownload.connect(self.handleDownloadActions)
        except AttributeError:
            pass
        try:
            explorer.file_view.sigDelete.connect(self.handleDeleteActions)
        except AttributeError:
            pass
        try:
            explorer.file_view.sigTransfer.connect(self.handleTransferActions)
        except AttributeError:
            pass

    def itemSelected(self, item):
        msg.clearMessage()  #
        self.sigPreview.emit(item)

    def addHPCTab(self, system):
        # # NERSC tabs based on NEWT API
        # add_nersc_explorer = lambda client: self.addFileExplorer(system.capitalize(),
        # FileExplorer(NERSCFileView(client, system, self)))
        # login_callback = lambda client: self.loginSuccess(client, add_explorer=add_nersc_explorer)
        # self.sigLoginRequest.emit(partial(cmanager.login, login_callback, cmanager.spot_client.login), False)
        # add_sftp_explorer = lambda client: self.addFileExplorer(client.host.split('.')[0],
        #                                                         FileExplorer(SFTPTreeWidget(client, self)))

        # NERSC tabs based on SFTP
        add_sftp_explorer = lambda client: self.addFileExplorer(
            system, FileExplorer(SFTPFileView(client, self)))
        add_sftp_callback = lambda client: self.loginSuccess(
            client, add_explorer=add_sftp_explorer)
        login_callback = lambda client: cmanager.add_sftp_client(
            system, client, add_sftp_callback)
        sftp_client = partial(cmanager.sftp_client,
                              cmanager.HPC_SYSTEM_ADDRESSES[system])
        self.sigLoginRequest.emit(
            partial(cmanager.login, login_callback, sftp_client), False)

    def addSPOTTab(self):
        add_spot_explorer = lambda client: self.addFileExplorer(
            'SPOT', SpotDatasetExplorer(client, self))
        login_callback = lambda client: self.loginSuccess(
            client, add_explorer=add_spot_explorer)
        self.sigLoginRequest.emit(
            partial(cmanager.login, login_callback,
                    cmanager.spot_client.login), False)

    #TODO add globus tranfer capabilities to SFTP connected machines if they are globus endpoints
    # This is being replaced by SFTP tabs
    # def addGlobusTab(self, endpoint):
    # add_globus_explorer = lambda client: self.addFileExplorer(endpoint.split('#')[-1],
    #                                                              FileExplorer(GlobusFileView(client, client, self)))
    #     add_endpoint_callback = lambda client: self.loginSuccess(client,
    #                                                              add_explorer=add_globus_explorer)
    #     login_callback = lambda client: cmanager.add_globus_client(endpoint.split('#')[-1],
    #                                                                client,
    #                                                                add_endpoint_callback)
    #     globus_client = cmanager.globus_client()
    #     self.sigLoginRequest.emit(partial(cmanager.login, login_callback, globus_client.login), False)

    def addSFTPTab(self):
        add_sftp_explorer = lambda client: self.addFileExplorer(
            client.host.split('.')[0], FileExplorer(SFTPFileView(client, self))
        )
        add_sftp_callback = lambda client: self.loginSuccess(
            client, add_explorer=add_sftp_explorer)
        login_callback = lambda client: cmanager.add_sftp_client(
            client.host, client, add_sftp_callback)
        sftp_client = cmanager.sftp_client
        self.sigLoginRequest.emit(
            partial(cmanager.login, login_callback, sftp_client), True)

    def loginSuccess(self, client, add_explorer=None):
        if not client:
            self.sigLoginSuccess.emit(False)
        else:
            add_explorer(client)
            self.enableActions()
            self.sigLoginSuccess.emit(True)

    def getSelectedFilePaths(self):
        return self.currentWidget().getSelectedFilePaths()

    def getCurrentPath(self):
        return self.currentWidget().path

    def getPath(self, tab_name):
        return self.explorers[tab_name].path

    def handleOpenActions(self, paths):
        if len(paths) > 0:
            self.sigOpen.emit(paths)

    def handleOpenFolderActions(self, paths):
        if len(paths) > 0:
            self.sigFolderOpen.emit(paths)

    def handleDeleteActions(self, paths):
        r = QtGui.QMessageBox.warning(
            self, 'Delete file',
            'Are you sure you want to delete\n{}?'.format(',\n'.join(paths)),
            QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
        if r == QtGui.QMessageBox.Yes:
            self.currentWidget().file_view.deleteSelection()

    def handleUploadAction(self, desc, method, args, kwargs):
        self.sigProgJob.emit(desc, method, args, kwargs)
        self.addTab(self.jobtab, 'Jobs')

    def handleDownloadActions(self, name, desc, method, args, kwargs, fslot):
        if 'save_path' not in kwargs and 'localpath' not in kwargs:
            fileDialog = QtGui.QFileDialog(self, 'Save as',
                                           os.path.expanduser('~'))
            fileDialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
            fileDialog.selectFile(name)
            if fileDialog.exec_():
                save_path = str(fileDialog.selectedFiles()[0])
                if isinstance(self.currentWidget().file_view, SFTPFileView):
                    kwargs['localpath'] = save_path
                else:
                    kwargs['save_path'] = save_path
        if isinstance(self.currentWidget().file_view, SFTPFileView):
            self.sigSFTPJob.emit(desc, method, args, kwargs, fslot)
        else:
            self.sigProgJob.emit(desc, method, args, kwargs, fslot)
        self.addTab(self.jobtab, 'Jobs')

    def handleTransferActions(self, paths, desc, method, args, kwargs):
        #TODO Need to implement this
        if isinstance(self.currentWidget().file_view, SpotDatasetView):
            self.sigPulsJob.emit(desc, method, args, kwargs)
        else:
            self.sigProgJob.emit(desc, method, args, kwargs)
        self.addTab(self.jobtab, 'Jobs')
Exemple #32
0
class MLFits(object):
    """
    ######## Fitter module ########

    This class contains the method to fit the event with the selected attributes.

    **WARNING**: All fits (and so results) are made using data in flux.

    Attributes :

        event : the event object on which you perform the fit on. More details on the event module.

        model : The microlensing model you want to fit. Has to be an object define in
                microlmodels module.
                More details on the microlmodels module.

        method : The fitting method you want to use for the fit.

        guess : The guess you can give to the fit or the guess return by the initial_guess function.

        fit_results : the fit parameters returned by method LM and DE.

        fit_covariance : the fit parameters covariance matrix returned by method LM and DE.

        fit_time : the time needed to fit.

        MCMC_chains : the MCMC chains returns by the MCMC method

        MCMC_probabilities : the objective function computed for each chains of the MCMC method

        fluxes_MCMC_method : a string describing how you want to estimate the model fluxes for the MCMC method.

        outputs : the standard pyLIMA outputs. More details in the microloutputs module.

    :param object event: the event object on which you perform the fit on. More details on the
                         event module.


    """
    def __init__(self, event):
        """The fit class has to be intialized with an event object."""

        self.event = event
        self.model = microlmodels.ModelPSPL(event)
        self.method = 'None'
        self.guess = []
        self.outputs = []
        self.fit_results = []
        self.fit_covariance = []
        self.fit_time = []
        self.DE_population = []
        self.binary_regime = None
        self.MCMC_chains = []
        self.MCMC_probabilities = []
        self.fluxes_MCMC_method = ''
        self.pool = None

    def mlfit(self,
              model,
              method,
              DE_population_size=10,
              flux_estimation_MCMC='MCMC',
              fix_parameters_dictionnary=None,
              grid_resolution=10,
              computational_pool=None,
              binary_regime=None):
        """This function realize the requested microlensing fit, and set the according results
        attributes.

        :param object model: the model object requested. More details on the microlmodels module.

        :param string method: The fitting method you want to use. Has to be a string  in :

                                 'LM' --> Levenberg-Marquardt algorithm. Based on the
                                 scipy.optimize.leastsq routine.
                                          **WARNING** : the parameter maxfev (number of maximum
                                          iterations) is set to 50000
                                          the parameter ftol (relative precision on the chi^2) is
                                          set to 0.00001
                                          your fit may not converge because of these limits.
                                          The starting points of this method are found using the
                                          initial_guess method.
                                          Obviously, this can fail. In this case, switch to
                                          method 'DE'.

                                 'DE' --> Differential evolution algoritm. Based on the
                                 scipy.optimize.differential_evolution.
                                          Look Storn & Price (1997) : "Differential Evolution – A
                                          Simple and Efficient Heuristic for global Optimization
                                          over Continuous Spaces"
                                          Because this method is heuristic, it is not 100% sure a
                                          satisfying solution is found. Just relaunch :)
                                          The result is then use as a starting point for the 'LM'
                                          method.


                                 'MCMC' --> Monte-Carlo Markov Chain algorithm. Based on the
                                 emcee python package :
                                          " emcee: The MCMC Hammer" (Foreman-Mackey et al. 2013).
                                          The inital population is computed around the best
                                          solution returned by
                                          the 'DE' method.


        :param int DE_population_size:  The population factor desired for the DE method. Default is 10.

        :param string flux_estimation_MCMC: The desired method to estimate the fluxes (f_source and g) of the
                                             telescopes. 'MCMC' will do this through an MCMC method (default) when
                                             everything else will do this thanks to a 1D polyfit through np.polyfit.

        Note that a sanity check is done post-fit to assess the fit quality with the check_fit
        function.
        """
        print('')
        print('Start fit on ' + self.event.name + ', with model ' +
              model.model_type + ' and method ' + method)
        self.event.check_event()

        self.model = model
        self.method = method
        self.fluxes_MCMC_method = flux_estimation_MCMC
        self.DE_population_size = DE_population_size
        self.model.define_model_parameters()
        if computational_pool:
            from mpi4py import MPI
            import dill

            MPI.pickle.dumps = dill.dumps
            MPI.pickle.loads = dill.loads
            self.pool = computational_pool
        self.binary_regime = binary_regime

        if self.method == 'LM':
            number_of_data = self.event.total_number_of_data_points()
            if number_of_data <= (len(self.model.model_dictionnary)):

                print("You do not have enough data points to use this method (LM), please switch to other methods." \
                      " Given the requested total model " + str(self.model.model_dictionnary.keys()) + \
                      " you need at least " + str(
                    len(self.model.model_dictionnary)) + ' data points to use the method LM!')
                return

            else:

                self.fit_results, self.fit_covariance, self.fit_time = self.lmarquardt(
                )

        if self.method == 'TRF':
            self.fit_results, self.fit_covariance, self.fit_time = self.trust_region_reflective(
            )
        if self.method == 'DE':
            self.fit_results, self.fit_covariance, self.fit_time = self.differential_evolution(
            )

        if self.method == 'MCMC':
            self.MCMC_chains = self.MCMC()

        if self.method == 'GRIDS':
            self.fix_parameters_dictionnary = OrderedDict(
                sorted(fix_parameters_dictionnary.items(), key=lambda x: x[1]))
            self.grid_resolution = grid_resolution
            self.grids()

        fit_quality_flag = 'Good Fit'

        if self.method != 'MCMC':
            fit_quality_flag = self.check_fit()

        if fit_quality_flag == 'Bad Fit':

            if self.method == 'LM':

                print('We have to change method, this fit was unsuccessfull. We decided to switch ' \
                      '' \
                      'method to "DE"')

                # self.method = 'DE'
                # self.mlfit(self.model, self.method, self.fluxes_MCMC_method)

            else:

                print('Unfortunately, this is too hard for pyLIMA :(')

    def check_fit(self):
        """Check if the fit results and covariance make sens.

         0.0 terms or a negative term in the diagonal covariance matrix indicate the fit is not
         reliable.

         A negative source flux is also counted as a bad fit.

         A negative rho or rho> 0.1 is also consider as a bad fit

         :return: a flag indicated good or bad fit ('Good Fit' or 'Bad Fit')
         :rtype: string
        """

        flag_quality = 'Good Fit'
        negative_covariance_diagonal = np.diag(self.fit_covariance) < 0
        number_of_data = self.event.total_number_of_data_points()
        if number_of_data >= (len(self.model.model_dictionnary) +
                              2 * len(self.event.telescopes)):

            if (0.0 in self.fit_covariance):
                print(
                    'Your fit probably wrong. Cause ==> bad covariance matrix')
                flag_quality = 'Bad Fit'
                return flag_quality

        if (True in negative_covariance_diagonal) | \
                (np.isnan(self.fit_covariance).any()) | (np.isinf(self.fit_covariance).any()):
            print('Your fit probably wrong. Cause ==> bad covariance matrix')
            flag_quality = 'Bad Fit'
            return flag_quality

        for i in self.event.telescopes:

            if self.fit_results[self.model.model_dictionnary['fs_' +
                                                             i.name]] < 0:
                print('Your fit probably wrong. Cause ==> negative source flux for telescope ' + \
                      i.name)
                flag_quality = 'Bad Fit'
                return flag_quality

        if 'rho' in list(self.model.model_dictionnary.keys()):

            if (self.fit_results[self.model.model_dictionnary['rho']] > 0.1) | \
                    (self.fit_results[self.model.model_dictionnary['rho']] < 0.0):
                print('Your fit probably wrong. Cause ==> bad rho ')
                flag_quality = 'Bad Fit'
                return flag_quality

        return flag_quality

    def initial_guess(self):
        """Try to estimate the microlensing parameters. Only use for PSPL and FSPL
           models. More details on microlguess module.

           :return guess_parameters: a list containing parameters guess related to the model.
           :rtype: list
        """

        if len(self.model.parameters_guess) == 0:

            # Estimate  the Paczynski parameters

            if self.model.model_type == 'PSPL':
                guess_paczynski_parameters, f_source = microlguess.initial_guess_PSPL(
                    self.event)

            if self.model.model_type == 'FSPL':
                guess_paczynski_parameters, f_source = microlguess.initial_guess_FSPL(
                    self.event)

            if self.model.model_type == 'DSPL':
                guess_paczynski_parameters, f_source = microlguess.initial_guess_DSPL(
                    self.event)

            # Estimate  the telescopes fluxes (flux_source + g_blending) parameters

            telescopes_fluxes = self.find_fluxes(guess_paczynski_parameters,
                                                 self.model)

            # The survey fluxes are already known from microlguess
            telescopes_fluxes[0] = f_source
            telescopes_fluxes[1] = 0.0

            if 'piEN' in self.model.model_dictionnary.keys():
                guess_paczynski_parameters = guess_paczynski_parameters + [
                    0.0, 0.0
                ]

            if 'XiEN' in self.model.model_dictionnary.keys():
                guess_paczynski_parameters = guess_paczynski_parameters + [
                    0, 0
                ]

            if 'dsdt' in self.model.model_dictionnary.keys():
                guess_paczynski_parameters = guess_paczynski_parameters + [
                    0, 0
                ]

            if 'spot_size' in self.model.model_dictionnary.keys():
                guess_paczynski_parameters = guess_paczynski_parameters + [0]

        else:

            guess_paczynski_parameters = list(self.model.parameters_guess)

            telescopes_fluxes = self.find_fluxes(guess_paczynski_parameters,
                                                 self.model)

        guess_paczynski_parameters += telescopes_fluxes

        print(sys._getframe().f_code.co_name,
              ' : Initial parameters guess SUCCESS')
        return guess_paczynski_parameters

    def MCMC(self):
        """ The MCMC method. Construct starting points of the chains around
            the best solution found by the 'DE' method.
            The objective function is :func:`chichi_MCMC`. Telescope flux (fs and g), can be optimized thanks to MCMC if
            flux_estimation_MCMC is 'MCMC', either they are derived through np.polyfit.

            Based on the emcee python package :
            " emcee: The MCMC Hammer" (Foreman-Mackey et al. 2013).
            Have a look here : http://dan.iel.fm/emcee/current/

            :return: a tuple containing (MCMC_chains, MCMC_probabilities)
            :rtype: tuple

            **WARNING** :
                   nwalkers is set to 100
                   nlinks is set to 300
                   5*nwalkers*nlinks MCMC steps in total
        """

        # start = python_time.time()

        if len(self.model.parameters_guess) == 0:

            differential_evolution_estimation = self.differential_evolution(
            )[0]
            self.DE_population_size = 10
            self.guess = differential_evolution_estimation

        else:

            self.guess = list(self.model.parameters_guess)
            self.guess += self.find_fluxes(self.guess, self.model)

        # Best solution

        limit_parameters = len(self.model.parameters_boundaries)
        best_solution = self.guess[:limit_parameters]

        nwalkers = 8 * len(best_solution)
        nlinks = 1000

        # Initialize the population of MCMC
        population = []

        count_walkers = 0

        while count_walkers < nwalkers:

            # Construct an individual of the population around the best solution.
            individual = []
            for parameter_key in list(
                    self.model.model_dictionnary.keys())[:limit_parameters]:

                parameter_trial = microlguess.MCMC_parameters_initialization(
                    parameter_key, self.model.model_dictionnary, best_solution)

                if parameter_trial:

                    for parameter in parameter_trial:
                        individual.append(parameter)

            if self.fluxes_MCMC_method == 'MCMC':
                fluxes = self.find_fluxes(individual, self.model)
                individual += fluxes

            chichi = self.chichi_MCMC(individual)

            if chichi != -np.inf:
                # np.array(individual)
                # print count_walkers

                population.append(np.array(individual))
                count_walkers += 1

        print('pre MCMC done')

        number_of_parameters = len(individual)

        sampler = emcee.EnsembleSampler(nwalkers,
                                        number_of_parameters,
                                        self.chichi_MCMC,
                                        a=2.0,
                                        pool=self.pool)

        # First estimation using population as a starting points.

        final_positions, final_probabilities, state = sampler.run_mcmc(
            population, nlinks)

        print('MCMC preburn done')

        sampler.reset()
        MCMC_chains = None

        # Final estimation using the previous output.
        for positions, probabilities, states in sampler.sample(
                final_positions, iterations=nlinks, storechain=True):
            chains = np.c_[positions, probabilities]
            if MCMC_chains is not None:

                MCMC_chains = np.r_[MCMC_chains, chains]
            else:

                MCMC_chains = chains

        print(sys._getframe().f_code.co_name, ' : MCMC fit SUCCESS')
        return MCMC_chains

    def chichi_MCMC(self, fit_process_parameters):
        """Return the chi^2 for the MCMC method. There is some priors here.

        :param list fit_process_parameters: the model parameters ingested by the correpsonding
                                            fitting routine.

        :returns: here, the return is -chi^2/2 (likelihood)

        :rtype: float
        """

        chichi = 0

        pyLIMA_parameters = self.model.compute_pyLIMA_parameters(
            fit_process_parameters)

        for index, parameter in enumerate(pyLIMA_parameters):
            if np.abs(parameter) > 10**100:
                import pdb
                pdb.set_trace()
                return -np.inf

        for telescope in self.event.telescopes:
            # Find the residuals of telescope observation regarding the parameters and model
            residus = self.model_residuals(telescope, pyLIMA_parameters)

            chichi += (residus**2).sum()

        return -chichi / 2

    def differential_evolution(self):
        """  The DE method. Differential evolution algorithm. The objective function is
        :func:`chichi_differential_evolution`. The flux parameters are estimated through np.polyfit.
         Based on the scipy.optimize.differential_evolution.
         Look Storn & Price (1997) :
         "Differential Evolution – A Simple and Efficient Heuristic for
         global Optimization over Continuous Spaces"

         :return: a tuple containing (fit_results, fit_covariance, computation_time)
         :rtype: tuple

         **WARNING** :
                   tol (relative standard deviation of the objective function) is set to 10^-4

                   popsize (the total number of individuals is :
                   popsize*number_of_paczynski_parameters)
                   is set to DE_population_size

                   mutation is set to (0.1, 1.5)

                   recombination is set to 0.7

                   These parameters can avoid the fit to properly converge (expected to be rare :)).
                   Just relaunch should be fine.
        """

        starting_time = python_time.time()
        differential_evolution_estimation = scipy.optimize.differential_evolution(
            self.chichi_differential_evolution,
            bounds=self.model.parameters_boundaries,
            mutation=(0.5, 1.0),
            popsize=int(self.DE_population_size),
            maxiter=5000,
            tol=0.0,
            atol=0.1,
            strategy='rand1bin',
            recombination=0.7,
            polish=True,
            init='latinhypercube',
            disp=True)

        # paczynski_parameters are all parameters to compute the model, excepted the telescopes fluxes.
        paczynski_parameters = differential_evolution_estimation['x'].tolist()

        print('DE converge to objective function : f(x) = ',
              str(differential_evolution_estimation['fun']))
        print('DE converge to parameters : = ',
              differential_evolution_estimation['x'].astype(str))

        # Construct the guess for the LM method. In principle, guess and outputs of the LM
        # method should be very close.

        number_of_data = self.event.total_number_of_data_points()
        if number_of_data <= (len(self.model.model_dictionnary)):

            print("You do not have enough data points to use LM method to estimate the covariance matrix." \
                  "The covariance matrix is set to 0.0. please switch to MCMC if you need errors estimation.")

            fit_results = paczynski_parameters + self.find_fluxes(paczynski_parameters, self.model) + \
                          [differential_evolution_estimation['fun']]
            fit_covariance = np.zeros(
                (len(paczynski_parameters) + 2 * len(self.event.telescopes),
                 len(paczynski_parameters) + 2 * len(self.event.telescopes)))

        else:

            self.guess = paczynski_parameters + self.find_fluxes(
                paczynski_parameters, self.model)

            fit_results, fit_covariance, fit_time = self.lmarquardt()

        computation_time = python_time.time() - starting_time

        print(sys._getframe().f_code.co_name,
              ' : Differential evolution fit SUCCESS')
        return fit_results, fit_covariance, computation_time

    def chichi_differential_evolution(self, fit_process_parameters):
        """Return the chi^2 for the DE method.

        :param list fit_process_parameters: the model parameters ingested by the correpsonding
        fitting routine.

        :returns: the chi^2

        :rtype: float
        """
        pyLIMA_parameters = self.model.compute_pyLIMA_parameters(
            fit_process_parameters)

        chichi = 0.0

        for telescope in self.event.telescopes:
            # Find the residuals of telescope observation regarding the parameters and model
            residus = self.model_residuals(telescope, pyLIMA_parameters)

            chichi += (residus**2).sum()

        self.DE_population.append(fit_process_parameters.tolist() + [chichi])

        return chichi

    def lmarquardt(self):
        """The LM method. This is based on the Levenberg-Marquardt algorithm:

           "A Method for the Solution of Certain Problems in Least Squares"
           Levenberg, K. Quart. Appl. Math. 2, 1944, p. 164-168
           "An Algorithm for Least-Squares Estimation of Nonlinear Parameters"
           Marquardt, D. SIAM J. Appl. Math. 11, 1963, p. 431-441

           Based on scipy.optimize.leastsq python routine, which is based on MINPACK's lmdif and
           lmder
           algorithms (fortran based).

           The objective function is :func:`residuals_LM`.
           The starting point parameters are self.guess.
           the Jacobian is given by :func:`LM_Jacobian`.

           The fit is performed on all parameters : Paczynski parameters and telescopes fluxes.

           :return: a tuple containing (fit_results, covariance_matrix, computation_time)
           :rtype: tuple

           **WARNING**:
                     ftol (relative error desired in the sum of square) is set to 10^-6
                     maxfev (maximum number of function call) is set to 50000
                     These limits can avoid the fit to properly converge (expected to be rare :))
        """
        starting_time = python_time.time()

        # use the analytical Jacobian (faster) if no second order are present, else let the
        # algorithm find it.
        if self.guess == []:
            self.guess = self.initial_guess()
        n_data = 0
        for telescope in self.event.telescopes:
            n_data = n_data + telescope.n_data('flux')

        n_parameters = len(self.model.model_dictionnary)

        if self.model.Jacobian_flag == 'OK':
            lmarquardt_fit = scipy.optimize.leastsq(self.residuals_LM,
                                                    self.guess,
                                                    maxfev=50000,
                                                    Dfun=self.LM_Jacobian,
                                                    col_deriv=0,
                                                    full_output=1,
                                                    ftol=10**-8,
                                                    xtol=10**-10,
                                                    gtol=10**-10)
            fit_result = lmarquardt_fit[0].tolist()
            fit_result.append(
                microltoolbox.chichi(self.residuals_LM, lmarquardt_fit[0]))

            try:
                # Try to extract the covariance matrix from the lmarquard_fit output

                covariance_matrix = lmarquardt_fit[1] * fit_result[-1] / (
                    n_data - n_parameters)

            except:

                covariance_matrix = np.zeros(
                    (len(self.model.model_dictionnary),
                     len(self.model.model_dictionnary)))
        else:

            lmarquardt_fit = scipy.optimize.least_squares(
                self.residuals_LM,
                self.guess,
                method='lm',
                x_scale='jac',
                ftol=10**-10,
                xtol=10**-10,
                gtol=10**-10,
            )
            fit_result = lmarquardt_fit['x'].tolist()
            fit_result.append(
                microltoolbox.chichi(self.residuals_LM, lmarquardt_fit['x']))

            try:
                # Try to extract the covariance matrix from the lmarquard_fit output
                jacobian = lmarquardt_fit['jac']

                covariance_matrix = np.linalg.inv(np.dot(jacobian.T, jacobian))

            except:

                covariance_matrix = np.zeros(
                    (len(self.model.model_dictionnary),
                     len(self.model.model_dictionnary)))

        computation_time = python_time.time() - starting_time

        # import pdb; pdb.set_trace()
        print(sys._getframe().f_code.co_name,
              ' : Levenberg_marquardt fit SUCCESS')
        print(fit_result)
        return fit_result, covariance_matrix, computation_time

    def residuals_LM(self, fit_process_parameters):
        """The normalized residuals associated to the model and parameters.

           :param list fit_process_parameters: the model parameters ingested by the correpsonding
                                               fitting routine.

           :return: a numpy array which represents the residuals_i for each telescope,
                    residuals_i=(data_i-model_i)/sigma_i
           :rtype: array_like
           The sum of square residuals gives chi^2.
        """

        pyLIMA_parameters = self.model.compute_pyLIMA_parameters(
            fit_process_parameters)

        residuals = np.array([])

        for telescope in self.event.telescopes:
            # Find the residuals of telescope observation regarding the parameters and model
            residus = self.model_residuals(telescope, pyLIMA_parameters)

            residuals = np.append(residuals, residus)

        # print python_time.time()-start

        return residuals

    def LM_Jacobian(self, fit_process_parameters):
        """Return the analytical Jacobian matrix, if requested by method LM.
        Available only for PSPL and FSPL without second_order.

        :param list fit_process_parameters: the model parameters ingested by the correpsonding
                                            fitting routine.
        :return: a numpy array which represents the jacobian matrix
        :rtype: array_like
        """

        pyLIMA_parameters = self.model.compute_pyLIMA_parameters(
            fit_process_parameters)

        count = 0
        # import pdb;
        # pdb.set_trace()
        for telescope in self.event.telescopes:

            if count == 0:

                _jacobi = self.model.model_Jacobian(telescope,
                                                    pyLIMA_parameters)

            else:

                _jacobi = np.c_[
                    _jacobi,
                    self.model.model_Jacobian(telescope, pyLIMA_parameters)]

            count += 1

        # The objective function is : (data-model)/errors

        _jacobi = -_jacobi
        jacobi = _jacobi[:-2]
        # Split the fs and g derivatives in several columns correpsonding to
        # each observatories
        start_index = 0
        dresdfs = _jacobi[-2]
        dresdg = _jacobi[-1]

        for telescope in self.event.telescopes:
            derivative_fs = np.zeros((len(dresdfs)))
            derivative_g = np.zeros((len(dresdg)))
            index = np.arange(
                start_index,
                start_index + len(telescope.lightcurve_flux[:, 0]))
            derivative_fs[index] = dresdfs[index]
            derivative_g[index] = dresdg[index]
            jacobi = np.r_[jacobi, np.array([derivative_fs, derivative_g])]

            start_index = index[-1] + 1

        return jacobi.T

    def trust_region_reflective(self):

        starting_time = python_time.time()

        # use the analytical Jacobian (faster) if no second order are present, else let the
        # algorithm find it.
        if self.guess == []:
            self.guess = self.initial_guess()

        bounds_min = [i[0] for i in self.model.parameters_boundaries
                      ] + [0, -np.inf] * len(self.event.telescopes)
        bounds_max = [i[1] for i in self.model.parameters_boundaries
                      ] + [np.inf, np.inf] * len(self.event.telescopes)

        if self.model.Jacobian_flag == 'OK':
            trf_fit = scipy.optimize.least_squares(self.residuals_LM,
                                                   self.guess,
                                                   max_nfev=50000,
                                                   jac=self.LM_Jacobian,
                                                   bounds=(bounds_min,
                                                           bounds_max),
                                                   ftol=10**-6,
                                                   xtol=10**-10,
                                                   gtol=10**-5)
        else:

            trf_fit = scipy.optimize.least_squares(self.residuals_LM,
                                                   self.guess,
                                                   max_nfev=50000,
                                                   bounds=(bounds_min,
                                                           bounds_max),
                                                   ftol=10**-6,
                                                   xtol=10**-10,
                                                   gtol=10**-5)
        computation_time = python_time.time() - starting_time

        fit_result = np.copy(trf_fit['x']).tolist()
        fit_result += [2 * trf_fit['cost']]

        try:

            jacobian = trf_fit['jac']

        except:

            jacobian = self.LM_Jacobian(fit_result)

        covariance_matrix = np.linalg.inv(np.dot(jacobian.T, jacobian))
        n_data = 0
        for telescope in self.event.telescopes:
            n_data = n_data + telescope.n_data('flux')

        n_parameters = len(self.model.model_dictionnary)
        covariance_matrix *= fit_result[-1] / (n_data - n_parameters)
        print(sys._getframe().f_code.co_name, ' : TRF fit SUCCESS')
        print(fit_result)
        return fit_result, covariance_matrix, computation_time

    def chichi_telescopes(self, fit_process_parameters):
        """Return a list of chi^2 (float) for individuals telescopes.

        :param list fit_process_parameters: the model parameters ingested by the correpsonding
                                            fitting routine.

        :returns: the chi^2 for each telescopes

        :rtype: list
        """

        residuals = self.residuals_LM(fit_process_parameters)
        chichi_list = []
        start_index = 0
        for telescope in self.event.telescopes:
            chichi_list.append(
                (residuals[start_index:start_index +
                           len(telescope.lightcurve_flux)]**2).sum())

            start_index += len(telescope.lightcurve_flux)

        return chichi_list

    def model_residuals(self, telescope, pyLIMA_parameters):
        """ Compute the residuals of a telescope lightcurve according to the model.

        :param object telescope: a telescope object. More details in telescopes module.
        :param object pyLIMA_parameters: object containing the model parameters, see microlmodels for more details

        :return: the residuals in flux, the priors
        :rtype: array_like, float
        """
        lightcurve = telescope.lightcurve_flux

        flux = lightcurve[:, 1]
        errflux = lightcurve[:, 2]

        microlensing_model = self.model.compute_the_microlensing_model(
            telescope, pyLIMA_parameters)

        residuals = (flux - microlensing_model[0]) / errflux

        return residuals

    def all_telescope_residuals(self, pyLIMA_parameters):
        """ Compute the residuals of all telescopes according to the model.

        :param object pyLIMA_parameters: object containing the model parameters, see microlmodels for more details

        :return: the residuals in flux,
        :rtype: list, a list of array of residuals in flux
        """

        residuals = []
        for telescope in self.event.telescopes:
            # Find the residuals of telescope observation regarding the parameters and model
            residus = self.model_residuals(telescope, pyLIMA_parameters)
            # no prior here
            residuals.append(residus)
        # print python_time.time()-start
        return residuals

    def find_fluxes(self, fit_process_parameters, model):
        """Find telescopes flux associated (fs,g) to the model. Used for initial_guess and LM
        method.

        :param list fit_process_parameters: the model parameters ingested by the correpsonding fitting
                                       routine.
        :param object model: a microlmodels which you want to compute the fs,g parameters.

        :return: a list of tuple with the (fs,g) telescopes flux parameters.
        :rtype: list
        """

        telescopes_fluxes = []
        pyLIMA_parameters = model.compute_pyLIMA_parameters(
            fit_process_parameters)

        for telescope in self.event.telescopes:

            flux = telescope.lightcurve_flux[:, 1]

            ml_model, f_source, f_blending = model.compute_the_microlensing_model(
                telescope, pyLIMA_parameters)

            # Prior here
            if f_source < 0:

                telescopes_fluxes.append(np.min(flux))
                telescopes_fluxes.append(0.0)
            else:
                telescopes_fluxes.append(f_source)
                telescopes_fluxes.append(f_blending)
        return telescopes_fluxes

    def grids(self):
        """ Compute models on a grid. ON CONSTRUCTION.
        """
        parameters_on_the_grid = []

        for parameter_name in self.fix_parameters_dictionnary:
            parameter_range = self.model.parameters_boundaries[
                self.model.model_dictionnary[parameter_name]]

            parameters_on_the_grid.append(
                np.linspace(parameter_range[0], parameter_range[1],
                            self.grid_resolution))

        hyper_grid = self.construct_the_hyper_grid(parameters_on_the_grid)

        self.new_parameters_boundaries = self.redefine_parameters_boundaries()

        if self.pool is not None:
            computational_map = self.pool.map

        else:
            computational_map = map

        grid_results = list(
            computational_map(
                emcee.ensemble._function_wrapper(
                    self.optimization_on_grid_pixel, args=[], kwargs={}),
                hyper_grid))

        return np.array(grid_results)

    def optimization_on_grid_pixel(self, grid_pixel_parameters):

        differential_evolution_estimation = scipy.optimize.differential_evolution(
            self.chichi_grids,
            bounds=self.new_parameters_boundaries,
            args=tuple(grid_pixel_parameters.tolist()),
            mutation=(0.5, 1.0),
            popsize=10,
            maxiter=1000,
            tol=0.0,
            atol=0.1,
            strategy='rand1bin',
            recombination=0.7,
            polish=True,
            disp=True)

        best_parameters = self.reconstruct_fit_process_parameters(
            differential_evolution_estimation['x'], grid_pixel_parameters)

        best_parameters += [differential_evolution_estimation['fun']]

        print(
            sys._getframe().f_code.co_name, ' Grid step on ' +
            str(grid_pixel_parameters.tolist()).strip('[]') +
            ' converge to f(x) = ' +
            str(differential_evolution_estimation['fun']))

        return best_parameters

    def chichi_grids(self, moving_parameters, *fix_parameters):
        """ Compute chi^2. ON CONSTRUCTION.
        """
        fit_process_parameters = self.reconstruct_fit_process_parameters(
            moving_parameters, fix_parameters)

        pyLIMA_parameters = self.model.compute_pyLIMA_parameters(
            fit_process_parameters)

        chichi = 0.0
        for telescope in self.event.telescopes:
            # Find the residuals of telescope observation regarding the parameters and model

            residus = self.model_residuals(telescope, pyLIMA_parameters)

            chichi += (residus**2).sum()

        return chichi

    def reconstruct_fit_process_parameters(self, moving_parameters,
                                           fix_parameters):
        """ Reconstruc parameters. ON CONSTRUCTION.
        """
        fit_process_parameters = []

        for key in list(self.model.model_dictionnary.keys()
                        )[:len(self.model.parameters_boundaries)]:

            if key in self.moving_parameters_dictionnary:

                fit_process_parameters.append(
                    moving_parameters[self.moving_parameters_dictionnary[key]])

            else:

                fit_process_parameters.append(
                    fix_parameters[self.fix_parameters_dictionnary[key]])

        return fit_process_parameters

    def redefine_parameters_boundaries(self):
        """ Recompute the parameters boundaries. ON CONSTRUCTION.
        """
        parameters_boundaries = []
        self.moving_parameters_dictionnary = {}
        count = 0

        for indice, key in enumerate(
                list(self.model.model_dictionnary.keys())
            [:len(self.model.parameters_boundaries)]):

            if key not in self.fix_parameters_dictionnary.keys():
                parameters_boundaries.append(
                    self.model.parameters_boundaries[indice])
                self.moving_parameters_dictionnary[key] = count
                count += 1
        return parameters_boundaries

    def construct_the_hyper_grid(self, parameters):
        """Define the grid. ON CONSTRUCTION.
        """
        params = map(np.asarray, parameters)
        grid = np.broadcast_arrays(
            *[x[(slice(None), ) + (None, ) * i] for i, x in enumerate(params)])

        reformate_grid = np.vstack(grid).reshape(len(parameters), -1).T
        return reformate_grid

    def produce_outputs(self):
        """ Produce the standard outputs for a fit.
        More details in microloutputs module.
        """

        if self.method != 'MCMC':

            outputs = microloutputs.LM_outputs(self)

        else:

            outputs = microloutputs.MCMC_outputs(self)

        self.outputs = outputs

    def produce_fit_statistics(self):
        """ Produce the standard outputs for a fit.
        More details in microloutputs module.
        """

        stats_outputs = microloutputs.statistical_outputs(self)

        self.stats_outputs = stats_outputs

    def produce_pdf(self, output_directory):
        """ ON CONSTRUCTION
        """
        microloutputs.pdf_output(self, output_directory)
Exemple #33
0
class Demo(object):
    def __init__(self):
        # rospy.init_node('demo')

        # self.move_base = MoveBaseClient()
        # self.torso_action = FollowTrajectoryClient("torso_controller", ["torso_lift_joint"])

        self.bridge = CvBridge()
        self.gazebo_client = GazeboClient(skip_models=None)
        self.head_action = PointHeadClient()
        self.grasping_client = GraspingClient()
        self.skipped = []
        self.m = 0
        rospy.wait_for_service('state', timeout=10.0)

    def cubes_bottom_top(self):
        self.is_top = []
        self.is_bottom = []
        self.combination = []
        self.all_sequence = []
        self.skipped = []
        # self.move_base.goto(.8, 0.0, 0.0)
        # self.torso_action.move_to([0.4, ])
        self.head_action.look_at(0.6, 0.1, 0.4, "map")
        self.grasping_client.tuck()
        rospy.sleep(1.0)

    def get_pose_init(self):
        model = self.get_model_states()
        self.cube_map_pose = OrderedDict()

        for i in range(len(model.name) - 6, len(model.name)):
            self.cube_map_pose[model.name[i]] = [
                model.pose[i].position.x, model.pose[i].position.y,
                model.pose[i].position.z
            ]

    def reset(self):
        state_data = 'error'
        while state_data == 'error':
            client = rospy.ServiceProxy('state', ImageState)
            state = client()

            if state.saved == 'success':
                with rosbag.Bag('state.bag', 'r') as inputbag:
                    for topic, msg, timestamp in inputbag.read_messages(
                            topics='/head_camera/rgb/image_color'):
                        image = self.bridge.imgmsg_to_cv2(
                            msg, desired_encoding='bgr8')
                        image = cv2.resize(image, (84, 84))
                        return image

    def get_model_states(self):
        model_state = False
        while not model_state:
            msg = rospy.wait_for_message('/gazebo/model_states',
                                         ModelStates,
                                         timeout=10.0)
            try:
                if msg:
                    return msg
            except:
                pass

    def step(self, seq_list):
        self.get_pose_init()
        update = False
        start = rospy.Time.now()
        try:
            while rospy.Time.now() - start <= rospy.Duration(3600.0):
                min_cube_detect = 7 - len(self.skipped) if self.skipped else 6
                max_cube_detect = 6
                detect_trial = 0
                detected = False
                while detect_trial <= 5.0:
                    self.grasping_client.updateScene()

                    if min_cube_detect <= len(
                            self.grasping_client.cubes) <= max_cube_detect:
                        detected = True
                        break
                    detect_trial += 1

                if not detected:
                    return None, None, True, False, 0, 0, 0, 0
                self.positions = OrderedDict()

                for i in range(len(self.grasping_client.cubes)):
                    pose = self.grasping_client.cubes[i]
                    error_list = []
                    # Base to map matrix transformation
                    ref_position = self.grasping_client.get_transform_pose(
                        pose)

                    for keys in self.cube_map_pose:
                        if keys not in self.positions:
                            error_list.append([
                                (ref_position.x -
                                 self.cube_map_pose[keys][0])**2,
                                (ref_position.y -
                                 self.cube_map_pose[keys][1])**2,
                                (ref_position.z -
                                 self.cube_map_pose[keys][2])**2
                            ])

                    error_list = np.array(error_list)

                    index = int(np.argmin(np.sum(error_list, axis=1)))

                    self.positions[self.cube_map_pose.keys()[index]] = pose
                    delete_keys = self.cube_map_pose.keys()[index]
                    del self.cube_map_pose[delete_keys]

                move_cube_start = cube_type[cube_idx[seq_list[0]]]
                move_cube_dest = cube_type[cube_idx[seq_list[1]]]
                selected = {'top': [], 'bottom': []}

                if (move_cube_start[0],
                        move_cube_dest[1]) in self.all_sequence:
                    next_state = self.reset()
                    reward = 0.0
                    terminal = False
                    update = True
                    # print('Already the sequence has beeen completed')
                    return next_state, reward, terminal, update, 0, 0, 0, 0

                if move_cube_start[0] not in self.is_bottom:
                    selected['top'] = [
                        move_cube_start[0], self.positions[move_cube_start[0]]
                    ]
                else:
                    # print('The start cube is on bottom')
                    next_state = self.reset()
                    reward = 0.0
                    terminal = False
                    update = True
                    # print('Already the sequence has beeen completed')
                    return next_state, reward, terminal, update, 0, 0, 0, 0

                if move_cube_dest[1] not in self.is_top and move_cube_dest[
                        1] not in self.is_bottom:
                    selected['bottom'] = [
                        move_cube_dest[1], self.positions[move_cube_dest[1]]
                    ]
                else:
                    next_state = self.reset()
                    reward = 0.0
                    terminal = False
                    update = True
                    # print('Already the sequence has beeen completed')
                    return next_state, reward, terminal, update, 0, 0, 0, 0

                sequence_object_list = [
                    self.positions.keys().index(selected['top'][0]),
                    self.positions.keys().index(selected['bottom'][0])
                ]

                pick_seq = self.grasping_client.get_sequence(
                    sequence_object_list)
                cube, grasps = pick_seq['start']
                if cube is None:
                    print('No cube')
                    return None, None, True, False, 0, 0, 0, 0

                picked = False

                grasp_attempt = 1
                if self.grasping_client.pick(cube, grasps):
                    picked = True
                if picked:
                    grasp_success = 1

                    placed = False

                    cube_target, grasps_target = pick_seq['target']
                    pose = PoseStamped()
                    pose.pose = cube_target.primitive_poses[0]
                    pose.pose.position.z += 0.09
                    pose.header.frame_id = cube_target.header.frame_id
                    if self.grasping_client.place(cube, pose):
                        placed = True
                    if placed:
                        self.grasping_client.tuck()
                        rospy.sleep(1.0)
                        if move_cube_start[0] not in self.skipped:
                            self.skipped.append(move_cube_start[0])
                        if move_cube_dest[1] not in self.skipped:
                            self.skipped.append(move_cube_dest[1])

                        model = self.get_model_states()
                        position_start = model.name.index(move_cube_start[0])
                        position_target = model.name.index(move_cube_dest[1])
                        gazebo_state_start = model.pose[position_start]
                        gazebo_state_target = model.pose[position_target]

                        if gazebo_state_start.position.z >= 0.45 and \
                          gazebo_state_target.position.x - 0.02 <= gazebo_state_start.position.x <= gazebo_state_target.position.x + 0.04 and \
                          gazebo_state_target.position.y - 0.02 <= gazebo_state_start.position.y <= gazebo_state_target.position.y + 0.04:

                            self.is_bottom.append(move_cube_dest[1])
                            if move_cube_start[0] not in self.is_top:
                                self.is_top.append(move_cube_start[0])
                            remove_index = None
                            for i in range(len(self.all_sequence)):
                                if move_cube_start[0] in self.all_sequence[i]:
                                    remove_index = i
                            if remove_index:
                                self.all_sequence.pop(remove_index)

                            if (move_cube_start[0], move_cube_dest[1]
                                ) not in self.all_sequence:
                                self.all_sequence.append(
                                    (move_cube_start[0], move_cube_dest[1]))

                            reward = 1.0 if move_cube_start[
                                0][:-2] == move_cube_dest[1][:-2] else -1.0

                            new_state = self.reset()
                            terminal = len(self.all_sequence) == 3
                            update = True
                            # 			# print('The cube has been placed correcly')
                            self.skipped.append(move_cube_start[0])
                            self.grasping_client.gripper_opening()
                            self.grasping_client.updateScene(
                                remove_collision=True)
                            return new_state, reward, terminal, update, 1, 1, 1, 1
                        #
                        else:
                            next_state = None
                            reward = None
                            terminal = True
                            update = False
                            self.grasping_client.gripper_opening()
                            # self.grasping_client.updateScene(remove_collision = True)
                            return next_state, reward, terminal, update, 1, 1, 1, 0
                    else:

                        self.grasping_client.gripper_opening()
                        self.grasping_client.updateScene(remove_collision=True)

                        next_state = None
                        reward = None
                        terminal = True
                        update = False
                        return next_state, reward, terminal, update, 1, 1, 1, 0
                #
                else:
                    self.grasping_client.gripper_opening()
                    # self.grasping_client.updateScene(remove_collision = True)
                    next_state = None
                    reward = 0.0
                    terminal = True
                    update = False

                    return next_state, reward, terminal, update, 1, 0, 0, 0

        except:
            return None, None, True, False, 0, 0, 0, 0
Exemple #34
0
def plot_conditions(epochs,
                    conditions=OrderedDict(),
                    ci=97.5,
                    n_boot=1000,
                    title='',
                    palette=None,
                    ylim=(-6, 6),
                    diff_waveform=(1, 2)):
    """Plot ERP conditions.
    Args:
        epochs (mne.epochs): EEG epochs
    Keyword Args:
        conditions (OrderedDict): dictionary that contains the names of the
            conditions to plot as keys, and the list of corresponding marker
            numbers as value. E.g.,
                conditions = {'Non-target': [0, 1],
                               'Target': [2, 3, 4]}
        ci (float): confidence interval in range [0, 100]
        n_boot (int): number of bootstrap samples
        title (str): title of the figure
        palette (list): color palette to use for conditions
        ylim (tuple): (ymin, ymax)
        diff_waveform (tuple or None): tuple of ints indicating which
            conditions to subtract for producing the difference waveform.
            If None, do not plot a difference waveform
    Returns:
        (matplotlib.figure.Figure): figure object
        (list of matplotlib.axes._subplots.AxesSubplot): list of axes
    """
    if isinstance(conditions, dict):
        conditions = OrderedDict(conditions)

    if palette is None:
        palette = sns.color_palette("hls", len(conditions) + 1)

    X = epochs.get_data() * 1e6
    times = epochs.times
    y = pd.Series(epochs.events[:, -1])

    fig, axes = plt.subplots(2, 2, figsize=[12, 6], sharex=True, sharey=True)
    axes = [axes[1, 0], axes[0, 0], axes[0, 1], axes[1, 1]]

    for ch in range(4):
        for cond, color in zip(conditions.values(), palette):
            sns.tsplot(X[y.isin(cond), ch],
                       time=times,
                       color=color,
                       n_boot=n_boot,
                       ci=ci,
                       ax=axes[ch])

        if diff_waveform:
            diff = (np.nanmean(X[y == diff_waveform[1], ch], axis=0) -
                    np.nanmean(X[y == diff_waveform[0], ch], axis=0))
            axes[ch].plot(times, diff, color='k', lw=1)

        axes[ch].set_title(epochs.ch_names[ch])
        axes[ch].set_ylim(ylim)
        axes[ch].axvline(x=0,
                         ymin=ylim[0],
                         ymax=ylim[1],
                         color='k',
                         lw=1,
                         label='_nolegend_')

    axes[0].set_xlabel('Time (s)')
    axes[0].set_ylabel('Amplitude (uV)')
    axes[-1].set_xlabel('Time (s)')
    axes[1].set_ylabel('Amplitude (uV)')

    if diff_waveform:
        legend = (['{} - {}'.format(diff_waveform[1], diff_waveform[0])] +
                  list(conditions.keys()))
    else:
        legend = conditions.keys()
    axes[-1].legend(legend)
    sns.despine()
    plt.tight_layout()

    if title:
        fig.suptitle(title, fontsize=20)

    return fig, axes
Exemple #35
0
class Module(object):
    r"""Base class for all neural network modules.

    Your models should also subclass this class.

    Modules can also contain other Modules, allowing to nest them in
    a tree structure. You can assign the submodules as regular attributes::

        import torch.nn as nn
        import torch.nn.functional as F

        class Model(nn.Module):
            def __init__(self):
                super(Model, self).__init__()
                self.conv1 = nn.Conv2d(1, 20, 5)
                self.conv2 = nn.Conv2d(20, 20, 5)

            def forward(self, x):
                x = F.relu(self.conv1(x))
                return F.relu(self.conv2(x))

    Submodules assigned in this way will be registered, and will have their
    parameters converted too when you call :meth:`to`, etc.
    """

    dump_patches = False

    r"""This allows better BC support for :meth:`load_state_dict`. In
    :meth:`state_dict`, the version number will be saved as in the attribute
    `_metadata` of the returned state dict, and thus pickled. `_metadata` is a
    dictionary with keys that follow the naming convention of state dict. See
    ``_load_from_state_dict`` on how to use this information in loading.

    If new parameters/buffers are added/removed from a module, this number shall
    be bumped, and the module's `_load_from_state_dict` method can compare the
    version number and do appropriate changes if the state dict is from before
    the change."""
    _version = 1

    def __init__(self):
        torch._C._log_api_usage_once("python.nn_module")
        self._backend = thnn_backend
        self._parameters = OrderedDict()
        self._buffers = OrderedDict()
        self._backward_hooks = OrderedDict()
        self._forward_hooks = OrderedDict()
        self._forward_pre_hooks = OrderedDict()
        self._state_dict_hooks = OrderedDict()
        self._load_state_dict_pre_hooks = OrderedDict()
        self._modules = OrderedDict()
        self.training = True

    def forward(self, *input):
        r"""Defines the computation performed at every call.

        Should be overridden by all subclasses.

        .. note::
            Although the recipe for forward pass needs to be defined within
            this function, one should call the :class:`Module` instance afterwards
            instead of this since the former takes care of running the
            registered hooks while the latter silently ignores them.
        """
        raise NotImplementedError

    def register_buffer(self, name, tensor):
        r"""Adds a persistent buffer to the module.

        This is typically used to register a buffer that should not to be
        considered a model parameter. For example, BatchNorm's ``running_mean``
        is not a parameter, but is part of the persistent state.

        Buffers can be accessed as attributes using given names.

        Args:
            name (string): name of the buffer. The buffer can be accessed
                from this module using the given name
            tensor (Tensor): buffer to be registered.

        Example::

            >>> self.register_buffer('running_mean', torch.zeros(num_features))

        """
        if '_buffers' not in self.__dict__:
            raise AttributeError(
                "cannot assign buffer before Module.__init__() call")
        elif not isinstance(name, torch._six.string_classes):
            raise TypeError("buffer name should be a string. "
                            "Got {}".format(torch.typename(name)))
        elif '.' in name:
            raise KeyError("buffer name can't contain \".\"")
        elif name == '':
            raise KeyError("buffer name can't be empty string \"\"")
        elif hasattr(self, name) and name not in self._buffers:
            raise KeyError("attribute '{}' already exists".format(name))
        elif tensor is not None and not isinstance(tensor, torch.Tensor):
            raise TypeError("cannot assign '{}' object to buffer '{}' "
                            "(torch Tensor or None required)"
                            .format(torch.typename(tensor), name))
        else:
            self._buffers[name] = tensor

    def register_parameter(self, name, param):
        r"""Adds a parameter to the module.

        The parameter can be accessed as an attribute using given name.

        Args:
            name (string): name of the parameter. The parameter can be accessed
                from this module using the given name
            param (Parameter): parameter to be added to the module.
        """
        if '_parameters' not in self.__dict__:
            raise AttributeError(
                "cannot assign parameter before Module.__init__() call")

        elif not isinstance(name, torch._six.string_classes):
            raise TypeError("parameter name should be a string. "
                            "Got {}".format(torch.typename(name)))
        elif '.' in name:
            raise KeyError("parameter name can't contain \".\"")
        elif name == '':
            raise KeyError("parameter name can't be empty string \"\"")
        elif hasattr(self, name) and name not in self._parameters:
            raise KeyError("attribute '{}' already exists".format(name))

        if param is None:
            self._parameters[name] = None
        elif not isinstance(param, Parameter):
            raise TypeError("cannot assign '{}' object to parameter '{}' "
                            "(torch.nn.Parameter or None required)"
                            .format(torch.typename(param), name))
        elif param.grad_fn:
            raise ValueError(
                "Cannot assign non-leaf Tensor to parameter '{0}'. Model "
                "parameters must be created explicitly. To express '{0}' "
                "as a function of another Tensor, compute the value in "
                "the forward() method.".format(name))
        else:
            self._parameters[name] = param

    def add_module(self, name, module):
        r"""Adds a child module to the current module.

        The module can be accessed as an attribute using the given name.

        Args:
            name (string): name of the child module. The child module can be
                accessed from this module using the given name
            module (Module): child module to be added to the module.
        """
        if not isinstance(module, Module) and module is not None:
            raise TypeError("{} is not a Module subclass".format(
                torch.typename(module)))
        elif not isinstance(name, torch._six.string_classes):
            raise TypeError("module name should be a string. Got {}".format(
                torch.typename(name)))
        elif hasattr(self, name) and name not in self._modules:
            raise KeyError("attribute '{}' already exists".format(name))
        elif '.' in name:
            raise KeyError("module name can't contain \".\"")
        elif name == '':
            raise KeyError("module name can't be empty string \"\"")
        self._modules[name] = module

    def _apply(self, fn):
        for module in self.children():
            module._apply(fn)

        def compute_should_use_set_data(tensor, tensor_applied):
            if torch._has_same_tensorimpl_type(tensor, tensor_applied):
                # If the new tensor has the same TensorImpl type as the existing tensor,
                # the current behavior is to change the tensor in-place using `.data =`,
                # and the future behavior is to overwrite the existing tensor. However,
                # changing the current behavior is a BC-breaking change, and we want it
                # to happen in future releases. So for now we introduce the
                # `torch.__future__.get_overwrite_module_params_on_conversion()`
                # global flag to let the user control whether they want the future
                # behavior of overwriting the existing tensor or not.
                return not torch.__future__.get_overwrite_module_params_on_conversion()
            else:
                return False

        for key, param in self._parameters.items():
            if param is not None:
                # Tensors stored in modules are graph leaves, and we don't want to
                # track autograd history of `param_applied`, so we have to use
                # `with torch.no_grad():`
                with torch.no_grad():
                    param_applied = fn(param)
                should_use_set_data = compute_should_use_set_data(param, param_applied)
                if should_use_set_data:
                    param.data = param_applied
                else:
                    assert isinstance(param, Parameter)
                    assert param.is_leaf
                    self._parameters[key] = Parameter(param_applied, param.requires_grad)

                if param.grad is not None:
                    with torch.no_grad():
                        grad_applied = fn(param.grad)
                    should_use_set_data = compute_should_use_set_data(param.grad, grad_applied)
                    if should_use_set_data:
                        param.grad.data = grad_applied
                    else:
                        assert param.grad.is_leaf
                        self._parameters[key].grad = grad_applied.requires_grad_(param.grad.requires_grad)

        for key, buf in self._buffers.items():
            if buf is not None:
                self._buffers[key] = fn(buf)

        return self

    def apply(self, fn):
        r"""Applies ``fn`` recursively to every submodule (as returned by ``.children()``)
        as well as self. Typical use includes initializing the parameters of a model
        (see also :ref:`torch-nn-init`).

        Args:
            fn (:class:`Module` -> None): function to be applied to each submodule

        Returns:
            Module: self

        Example::

            >>> def init_weights(m):
            >>>     print(m)
            >>>     if type(m) == nn.Linear:
            >>>         m.weight.data.fill_(1.0)
            >>>         print(m.weight)
            >>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
            >>> net.apply(init_weights)
            Linear(in_features=2, out_features=2, bias=True)
            Parameter containing:
            tensor([[ 1.,  1.],
                    [ 1.,  1.]])
            Linear(in_features=2, out_features=2, bias=True)
            Parameter containing:
            tensor([[ 1.,  1.],
                    [ 1.,  1.]])
            Sequential(
              (0): Linear(in_features=2, out_features=2, bias=True)
              (1): Linear(in_features=2, out_features=2, bias=True)
            )
            Sequential(
              (0): Linear(in_features=2, out_features=2, bias=True)
              (1): Linear(in_features=2, out_features=2, bias=True)
            )
        """
        for module in self.children():
            module.apply(fn)
        fn(self)
        return self

    def cuda(self, device=None):
        r"""Moves all model parameters and buffers to the GPU.

        This also makes associated parameters and buffers different objects. So
        it should be called before constructing optimizer if the module will
        live on GPU while being optimized.

        Arguments:
            device (int, optional): if specified, all parameters will be
                copied to that device

        Returns:
            Module: self
        """
        return self._apply(lambda t: t.cuda(device))

    def cpu(self):
        r"""Moves all model parameters and buffers to the CPU.

        Returns:
            Module: self
        """
        return self._apply(lambda t: t.cpu())

    def type(self, dst_type):
        r"""Casts all parameters and buffers to :attr:`dst_type`.

        Arguments:
            dst_type (type or string): the desired type

        Returns:
            Module: self
        """
        return self._apply(lambda t: t.type(dst_type))

    def float(self):
        r"""Casts all floating point parameters and buffers to float datatype.

        Returns:
            Module: self
        """
        return self._apply(lambda t: t.float() if t.is_floating_point() else t)

    def double(self):
        r"""Casts all floating point parameters and buffers to ``double`` datatype.

        Returns:
            Module: self
        """
        return self._apply(lambda t: t.double() if t.is_floating_point() else t)

    def half(self):
        r"""Casts all floating point parameters and buffers to ``half`` datatype.

        Returns:
            Module: self
        """
        return self._apply(lambda t: t.half() if t.is_floating_point() else t)

    def to(self, *args, **kwargs):
        r"""Moves and/or casts the parameters and buffers.

        This can be called as

        .. function:: to(device=None, dtype=None, non_blocking=False)

        .. function:: to(dtype, non_blocking=False)

        .. function:: to(tensor, non_blocking=False)

        Its signature is similar to :meth:`torch.Tensor.to`, but only accepts
        floating point desired :attr:`dtype` s. In addition, this method will
        only cast the floating point parameters and buffers to :attr:`dtype`
        (if given). The integral parameters and buffers will be moved
        :attr:`device`, if that is given, but with dtypes unchanged. When
        :attr:`non_blocking` is set, it tries to convert/move asynchronously
        with respect to the host if possible, e.g., moving CPU Tensors with
        pinned memory to CUDA devices.

        See below for examples.

        .. note::
            This method modifies the module in-place.

        Args:
            device (:class:`torch.device`): the desired device of the parameters
                and buffers in this module
            dtype (:class:`torch.dtype`): the desired floating point type of
                the floating point parameters and buffers in this module
            tensor (torch.Tensor): Tensor whose dtype and device are the desired
                dtype and device for all parameters and buffers in this module

        Returns:
            Module: self

        Example::

            >>> linear = nn.Linear(2, 2)
            >>> linear.weight
            Parameter containing:
            tensor([[ 0.1913, -0.3420],
                    [-0.5113, -0.2325]])
            >>> linear.to(torch.double)
            Linear(in_features=2, out_features=2, bias=True)
            >>> linear.weight
            Parameter containing:
            tensor([[ 0.1913, -0.3420],
                    [-0.5113, -0.2325]], dtype=torch.float64)
            >>> gpu1 = torch.device("cuda:1")
            >>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
            Linear(in_features=2, out_features=2, bias=True)
            >>> linear.weight
            Parameter containing:
            tensor([[ 0.1914, -0.3420],
                    [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
            >>> cpu = torch.device("cpu")
            >>> linear.to(cpu)
            Linear(in_features=2, out_features=2, bias=True)
            >>> linear.weight
            Parameter containing:
            tensor([[ 0.1914, -0.3420],
                    [-0.5112, -0.2324]], dtype=torch.float16)

        """

        device, dtype, non_blocking = torch._C._nn._parse_to(*args, **kwargs)

        if dtype is not None:
            if not dtype.is_floating_point:
                raise TypeError('nn.Module.to only accepts floating point '
                                'dtypes, but got desired dtype={}'.format(dtype))

        def convert(t):
            return t.to(device, dtype if t.is_floating_point() else None, non_blocking)

        return self._apply(convert)

    def register_backward_hook(self, hook):
        r"""Registers a backward hook on the module.

        The hook will be called every time the gradients with respect to module
        inputs are computed. The hook should have the following signature::

            hook(module, grad_input, grad_output) -> Tensor or None

        The :attr:`grad_input` and :attr:`grad_output` may be tuples if the
        module has multiple inputs or outputs. The hook should not modify its
        arguments, but it can optionally return a new gradient with respect to
        input that will be used in place of :attr:`grad_input` in subsequent
        computations.

        Returns:
            :class:`torch.utils.hooks.RemovableHandle`:
                a handle that can be used to remove the added hook by calling
                ``handle.remove()``

        .. warning ::

            The current implementation will not have the presented behavior
            for complex :class:`Module` that perform many operations.
            In some failure cases, :attr:`grad_input` and :attr:`grad_output` will only
            contain the gradients for a subset of the inputs and outputs.
            For such :class:`Module`, you should use :func:`torch.Tensor.register_hook`
            directly on a specific input or output to get the required gradients.

        """
        handle = hooks.RemovableHandle(self._backward_hooks)
        self._backward_hooks[handle.id] = hook
        return handle

    def register_forward_pre_hook(self, hook):
        r"""Registers a forward pre-hook on the module.

        The hook will be called every time before :func:`forward` is invoked.
        It should have the following signature::

            hook(module, input) -> None

        The hook should not modify the input.

        Returns:
            :class:`torch.utils.hooks.RemovableHandle`:
                a handle that can be used to remove the added hook by calling
                ``handle.remove()``
        """
        handle = hooks.RemovableHandle(self._forward_pre_hooks)
        self._forward_pre_hooks[handle.id] = hook
        return handle

    def register_forward_hook(self, hook):
        r"""Registers a forward hook on the module.

        The hook will be called every time after :func:`forward` has computed an output.
        It should have the following signature::

            hook(module, input, output) -> None

        The hook should not modify the input or output.

        Returns:
            :class:`torch.utils.hooks.RemovableHandle`:
                a handle that can be used to remove the added hook by calling
                ``handle.remove()``
        """
        handle = hooks.RemovableHandle(self._forward_hooks)
        self._forward_hooks[handle.id] = hook
        return handle

    def _tracing_name(self, tracing_state):
        if not tracing_state._traced_module_stack:
            return None
        module = tracing_state._traced_module_stack[-1]
        for name, child in module.named_children():
            if child is self:
                return name
        return None

    def _slow_forward(self, *input, **kwargs):
        tracing_state = torch._C._get_tracing_state()
        if not tracing_state:
            return self.forward(*input, **kwargs)
        if not hasattr(tracing_state, '_traced_module_stack'):
            tracing_state._traced_module_stack = []
        name = self._tracing_name(tracing_state)
        if name:
            tracing_state.push_scope('%s[%s]' % (self._get_name(), name))
        else:
            tracing_state.push_scope(self._get_name())
        tracing_state._traced_module_stack.append(self)
        try:
            result = self.forward(*input, **kwargs)
        finally:
            tracing_state.pop_scope()
            tracing_state._traced_module_stack.pop()
        return result

    def __call__(self, *input, **kwargs):
        for hook in self._forward_pre_hooks.values():
            hook(self, input)
        if torch._C._get_tracing_state():
            result = self._slow_forward(*input, **kwargs)
        else:
            result = self.forward(*input, **kwargs)
        for hook in self._forward_hooks.values():
            hook_result = hook(self, input, result)
            if hook_result is not None:
                raise RuntimeError(
                    "forward hooks should never return any values, but '{}'"
                    "didn't return None".format(hook))
        if len(self._backward_hooks) > 0:
            var = result
            while not isinstance(var, torch.Tensor):
                if isinstance(var, dict):
                    var = next((v for v in var.values() if isinstance(v, torch.Tensor)))
                else:
                    var = var[0]
            grad_fn = var.grad_fn
            if grad_fn is not None:
                for hook in self._backward_hooks.values():
                    wrapper = functools.partial(hook, self)
                    functools.update_wrapper(wrapper, hook)
                    grad_fn.register_hook(wrapper)
        return result

    def __setstate__(self, state):
        self.__dict__.update(state)
        # Support loading old checkpoints that don't have the following attrs:
        if '_forward_pre_hooks' not in self.__dict__:
            self._forward_pre_hooks = OrderedDict()
        if '_state_dict_hooks' not in self.__dict__:
            self._state_dict_hooks = OrderedDict()
        if '_load_state_dict_pre_hooks' not in self.__dict__:
            self._load_state_dict_pre_hooks = OrderedDict()

    def __getattr__(self, name):
        if '_parameters' in self.__dict__:
            _parameters = self.__dict__['_parameters']
            if name in _parameters:
                return _parameters[name]
        if '_buffers' in self.__dict__:
            _buffers = self.__dict__['_buffers']
            if name in _buffers:
                return _buffers[name]
        if '_modules' in self.__dict__:
            modules = self.__dict__['_modules']
            if name in modules:
                return modules[name]
        raise AttributeError("'{}' object has no attribute '{}'".format(
            type(self).__name__, name))

    def __setattr__(self, name, value):
        def remove_from(*dicts):
            for d in dicts:
                if name in d:
                    del d[name]

        params = self.__dict__.get('_parameters')
        if isinstance(value, Parameter):
            if params is None:
                raise AttributeError(
                    "cannot assign parameters before Module.__init__() call")
            remove_from(self.__dict__, self._buffers, self._modules)
            self.register_parameter(name, value)
        elif params is not None and name in params:
            if value is not None:
                raise TypeError("cannot assign '{}' as parameter '{}' "
                                "(torch.nn.Parameter or None expected)"
                                .format(torch.typename(value), name))
            self.register_parameter(name, value)
        else:
            modules = self.__dict__.get('_modules')
            if isinstance(value, Module):
                if modules is None:
                    raise AttributeError(
                        "cannot assign module before Module.__init__() call")
                remove_from(self.__dict__, self._parameters, self._buffers)
                modules[name] = value
            elif modules is not None and name in modules:
                if value is not None:
                    raise TypeError("cannot assign '{}' as child module '{}' "
                                    "(torch.nn.Module or None expected)"
                                    .format(torch.typename(value), name))
                modules[name] = value
            else:
                buffers = self.__dict__.get('_buffers')
                if buffers is not None and name in buffers:
                    if value is not None and not isinstance(value, torch.Tensor):
                        raise TypeError("cannot assign '{}' as buffer '{}' "
                                        "(torch.Tensor or None expected)"
                                        .format(torch.typename(value), name))
                    buffers[name] = value
                else:
                    object.__setattr__(self, name, value)

    def __delattr__(self, name):
        if name in self._parameters:
            del self._parameters[name]
        elif name in self._buffers:
            del self._buffers[name]
        elif name in self._modules:
            del self._modules[name]
        else:
            object.__delattr__(self, name)

    def _register_state_dict_hook(self, hook):
        r"""These hooks will be called with arguments: `self`, `state_dict`,
        `prefix`, `local_metadata`, after the `state_dict` of `self` is set.
        Note that only parameters and buffers of `self` or its children are
        guaranteed to exist in `state_dict`. The hooks may modify `state_dict`
        inplace or return a new one.
        """
        handle = hooks.RemovableHandle(self._state_dict_hooks)
        self._state_dict_hooks[handle.id] = hook
        return handle

    def state_dict(self, destination=None, prefix='', keep_vars=False):
        r"""Returns a dictionary containing a whole state of the module.

        Both parameters and persistent buffers (e.g. running averages) are
        included. Keys are corresponding parameter and buffer names.

        Returns:
            dict:
                a dictionary containing a whole state of the module

        Example::

            >>> module.state_dict().keys()
            ['bias', 'weight']

        """
        if destination is None:
            destination = OrderedDict()
            destination._metadata = OrderedDict()
        destination._metadata[prefix[:-1]] = local_metadata = dict(version=self._version)
        for name, param in self._parameters.items():
            if param is not None:
                destination[prefix + name] = param if keep_vars else param.data
        for name, buf in self._buffers.items():
            if buf is not None:
                destination[prefix + name] = buf if keep_vars else buf.data
        for name, module in self._modules.items():
            if module is not None:
                module.state_dict(destination, prefix + name + '.', keep_vars=keep_vars)
        for hook in self._state_dict_hooks.values():
            hook_result = hook(self, destination, prefix, local_metadata)
            if hook_result is not None:
                destination = hook_result
        return destination

    def _register_load_state_dict_pre_hook(self, hook):
        r"""These hooks will be called with arguments: `state_dict`, `prefix`,
        `local_metadata`, `strict`, `missing_keys`, `unexpected_keys`,
        `error_msgs`, before loading `state_dict` into `self`. These arguments
        are exactly the same as those of `_load_from_state_dict`.
        """
        handle = hooks.RemovableHandle(self._load_state_dict_pre_hooks)
        self._load_state_dict_pre_hooks[handle.id] = hook
        return handle

    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
                              missing_keys, unexpected_keys, error_msgs):
        r"""Copies parameters and buffers from :attr:`state_dict` into only
        this module, but not its descendants. This is called on every submodule
        in :meth:`~torch.nn.Module.load_state_dict`. Metadata saved for this
        module in input :attr:`state_dict` is provided as :attr:`local_metadata`.
        For state dicts without metadata, :attr:`local_metadata` is empty.
        Subclasses can achieve class-specific backward compatible loading using
        the version number at `local_metadata.get("version", None)`.

        .. note::
            :attr:`state_dict` is not the same object as the input
            :attr:`state_dict` to :meth:`~torch.nn.Module.load_state_dict`. So
            it can be modified.

        Arguments:
            state_dict (dict): a dict containing parameters and
                persistent buffers.
            prefix (str): the prefix for parameters and buffers used in this
                module
            local_metadata (dict): a dict containing the metadata for this module.
                See
            strict (bool): whether to strictly enforce that the keys in
                :attr:`state_dict` with :attr:`prefix` match the names of
                parameters and buffers in this module
            missing_keys (list of str): if ``strict=True``, add missing keys to
                this list
            unexpected_keys (list of str): if ``strict=True``, add unexpected
                keys to this list
            error_msgs (list of str): error messages should be added to this
                list, and will be reported together in
                :meth:`~torch.nn.Module.load_state_dict`
        """
        for hook in self._load_state_dict_pre_hooks.values():
            hook(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)

        local_name_params = itertools.chain(self._parameters.items(), self._buffers.items())
        local_state = {k: v.data for k, v in local_name_params if v is not None}

        for name, param in local_state.items():
            key = prefix + name
            if key in state_dict:
                input_param = state_dict[key]

                # Backward compatibility: loading 1-dim tensor from 0.3.* to version 0.4+
                if len(param.shape) == 0 and len(input_param.shape) == 1:
                    input_param = input_param[0]

                if input_param.shape != param.shape:
                    # local shape should match the one in checkpoint
                    error_msgs.append('size mismatch for {}: copying a param with shape {} from checkpoint, '
                                      'the shape in current model is {}.'
                                      .format(key, input_param.shape, param.shape))
                    continue

                if isinstance(input_param, Parameter):
                    # backwards compatibility for serialized parameters
                    input_param = input_param.data
                try:
                    param.copy_(input_param)
                except Exception:
                    error_msgs.append('While copying the parameter named "{}", '
                                      'whose dimensions in the model are {} and '
                                      'whose dimensions in the checkpoint are {}.'
                                      .format(key, param.size(), input_param.size()))
            elif strict:
                missing_keys.append(key)

        if strict:
            for key in state_dict.keys():
                if key.startswith(prefix):
                    input_name = key[len(prefix):]
                    input_name = input_name.split('.', 1)[0]  # get the name of param/buffer/child
                    if input_name not in self._modules and input_name not in local_state:
                        unexpected_keys.append(key)

    def load_state_dict(self, state_dict, strict=True):
        r"""Copies parameters and buffers from :attr:`state_dict` into
        this module and its descendants. If :attr:`strict` is ``True``, then
        the keys of :attr:`state_dict` must exactly match the keys returned
        by this module's :meth:`~torch.nn.Module.state_dict` function.

        Arguments:
            state_dict (dict): a dict containing parameters and
                persistent buffers.
            strict (bool, optional): whether to strictly enforce that the keys
                in :attr:`state_dict` match the keys returned by this module's
                :meth:`~torch.nn.Module.state_dict` function. Default: ``True``

        Returns:
            ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields:
                * **missing_keys** is a list of str containing the missing keys
                * **unexpected_keys** is a list of str containing the unexpected keys
        """
        missing_keys = []
        unexpected_keys = []
        error_msgs = []

        # copy state_dict so _load_from_state_dict can modify it
        metadata = getattr(state_dict, '_metadata', None)
        state_dict = state_dict.copy()
        if metadata is not None:
            state_dict._metadata = metadata

        def load(module, prefix=''):
            local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
            module._load_from_state_dict(
                state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
            for name, child in module._modules.items():
                if child is not None:
                    load(child, prefix + name + '.')

        load(self)
        load = None  # break load->load reference cycle

        if strict:
            if len(unexpected_keys) > 0:
                error_msgs.insert(
                    0, 'Unexpected key(s) in state_dict: {}. '.format(
                        ', '.join('"{}"'.format(k) for k in unexpected_keys)))
            if len(missing_keys) > 0:
                error_msgs.insert(
                    0, 'Missing key(s) in state_dict: {}. '.format(
                        ', '.join('"{}"'.format(k) for k in missing_keys)))

        if len(error_msgs) > 0:
            raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
                               self.__class__.__name__, "\n\t".join(error_msgs)))
        return _IncompatibleKeys(missing_keys, unexpected_keys)

    def _named_members(self, get_members_fn, prefix='', recurse=True):
        r"""Helper method for yielding various names + members of modules."""
        memo = set()
        modules = self.named_modules(prefix=prefix) if recurse else [(prefix, self)]
        for module_prefix, module in modules:
            members = get_members_fn(module)
            for k, v in members:
                if v is None or v in memo:
                    continue
                memo.add(v)
                name = module_prefix + ('.' if module_prefix else '') + k
                yield name, v

    def parameters(self, recurse=True):
        r"""Returns an iterator over module parameters.

        This is typically passed to an optimizer.

        Args:
            recurse (bool): if True, then yields parameters of this module
                and all submodules. Otherwise, yields only parameters that
                are direct members of this module.

        Yields:
            Parameter: module parameter

        Example::

            >>> for param in model.parameters():
            >>>     print(type(param.data), param.size())
            <class 'torch.FloatTensor'> (20L,)
            <class 'torch.FloatTensor'> (20L, 1L, 5L, 5L)

        """
        for name, param in self.named_parameters(recurse=recurse):
            yield param

    def named_parameters(self, prefix='', recurse=True):
        r"""Returns an iterator over module parameters, yielding both the
        name of the parameter as well as the parameter itself.

        Args:
            prefix (str): prefix to prepend to all parameter names.
            recurse (bool): if True, then yields parameters of this module
                and all submodules. Otherwise, yields only parameters that
                are direct members of this module.

        Yields:
            (string, Parameter): Tuple containing the name and parameter

        Example::

            >>> for name, param in self.named_parameters():
            >>>    if name in ['bias']:
            >>>        print(param.size())

        """
        gen = self._named_members(
            lambda module: module._parameters.items(),
            prefix=prefix, recurse=recurse)
        for elem in gen:
            yield elem

    def buffers(self, recurse=True):
        r"""Returns an iterator over module buffers.

        Args:
            recurse (bool): if True, then yields buffers of this module
                and all submodules. Otherwise, yields only buffers that
                are direct members of this module.

        Yields:
            torch.Tensor: module buffer

        Example::

            >>> for buf in model.buffers():
            >>>     print(type(buf.data), buf.size())
            <class 'torch.FloatTensor'> (20L,)
            <class 'torch.FloatTensor'> (20L, 1L, 5L, 5L)

        """
        for name, buf in self.named_buffers(recurse=recurse):
            yield buf

    def named_buffers(self, prefix='', recurse=True):
        r"""Returns an iterator over module buffers, yielding both the
        name of the buffer as well as the buffer itself.

        Args:
            prefix (str): prefix to prepend to all buffer names.
            recurse (bool): if True, then yields buffers of this module
                and all submodules. Otherwise, yields only buffers that
                are direct members of this module.

        Yields:
            (string, torch.Tensor): Tuple containing the name and buffer

        Example::

            >>> for name, buf in self.named_buffers():
            >>>    if name in ['running_var']:
            >>>        print(buf.size())

        """
        gen = self._named_members(
            lambda module: module._buffers.items(),
            prefix=prefix, recurse=recurse)
        for elem in gen:
            yield elem

    def children(self):
        r"""Returns an iterator over immediate children modules.

        Yields:
            Module: a child module
        """
        for name, module in self.named_children():
            yield module

    def named_children(self):
        r"""Returns an iterator over immediate children modules, yielding both
        the name of the module as well as the module itself.

        Yields:
            (string, Module): Tuple containing a name and child module

        Example::

            >>> for name, module in model.named_children():
            >>>     if name in ['conv4', 'conv5']:
            >>>         print(module)

        """
        memo = set()
        for name, module in self._modules.items():
            if module is not None and module not in memo:
                memo.add(module)
                yield name, module

    def modules(self):
        r"""Returns an iterator over all modules in the network.

        Yields:
            Module: a module in the network

        Note:
            Duplicate modules are returned only once. In the following
            example, ``l`` will be returned only once.

        Example::

            >>> l = nn.Linear(2, 2)
            >>> net = nn.Sequential(l, l)
            >>> for idx, m in enumerate(net.modules()):
                    print(idx, '->', m)

            0 -> Sequential(
              (0): Linear(in_features=2, out_features=2, bias=True)
              (1): Linear(in_features=2, out_features=2, bias=True)
            )
            1 -> Linear(in_features=2, out_features=2, bias=True)

        """
        for name, module in self.named_modules():
            yield module

    def named_modules(self, memo=None, prefix=''):
        r"""Returns an iterator over all modules in the network, yielding
        both the name of the module as well as the module itself.

        Yields:
            (string, Module): Tuple of name and module

        Note:
            Duplicate modules are returned only once. In the following
            example, ``l`` will be returned only once.

        Example::

            >>> l = nn.Linear(2, 2)
            >>> net = nn.Sequential(l, l)
            >>> for idx, m in enumerate(net.named_modules()):
                    print(idx, '->', m)

            0 -> ('', Sequential(
              (0): Linear(in_features=2, out_features=2, bias=True)
              (1): Linear(in_features=2, out_features=2, bias=True)
            ))
            1 -> ('0', Linear(in_features=2, out_features=2, bias=True))

        """

        if memo is None:
            memo = set()
        if self not in memo:
            memo.add(self)
            yield prefix, self
            for name, module in self._modules.items():
                if module is None:
                    continue
                submodule_prefix = prefix + ('.' if prefix else '') + name
                for m in module.named_modules(memo, submodule_prefix):
                    yield m

    def train(self, mode=True):
        r"""Sets the module in training mode.

        This has any effect only on certain modules. See documentations of
        particular modules for details of their behaviors in training/evaluation
        mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,
        etc.

        Returns:
            Module: self
        """
        self.training = mode
        for module in self.children():
            module.train(mode)
        return self

    def eval(self):
        r"""Sets the module in evaluation mode.

        This has any effect only on certain modules. See documentations of
        particular modules for details of their behaviors in training/evaluation
        mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,
        etc.
        """
        return self.train(False)

    def zero_grad(self):
        r"""Sets gradients of all model parameters to zero."""
        for p in self.parameters():
            if p.grad is not None:
                p.grad.detach_()
                p.grad.zero_()

    def share_memory(self):
        return self._apply(lambda t: t.share_memory_())

    def _get_name(self):
        return self.__class__.__name__

    def extra_repr(self):
        r"""Set the extra representation of the module

        To print customized extra information, you should reimplement
        this method in your own modules. Both single-line and multi-line
        strings are acceptable.
        """
        return ''

    def __repr__(self):
        # We treat the extra repr like the sub-module, one item per line
        extra_lines = []
        extra_repr = self.extra_repr()
        # empty string will be split into list ['']
        if extra_repr:
            extra_lines = extra_repr.split('\n')
        child_lines = []
        for key, module in self._modules.items():
            mod_str = repr(module)
            mod_str = _addindent(mod_str, 2)
            child_lines.append('(' + key + '): ' + mod_str)
        lines = extra_lines + child_lines

        main_str = self._get_name() + '('
        if lines:
            # simple one-liner info, which most builtin Modules will use
            if len(extra_lines) == 1 and not child_lines:
                main_str += extra_lines[0]
            else:
                main_str += '\n  ' + '\n  '.join(lines) + '\n'

        main_str += ')'
        return main_str

    def __dir__(self):
        module_attrs = dir(self.__class__)
        attrs = list(self.__dict__.keys())
        parameters = list(self._parameters.keys())
        modules = list(self._modules.keys())
        buffers = list(self._buffers.keys())
        keys = module_attrs + attrs + parameters + modules + buffers

        # Eliminate attrs that are not legal Python variable names
        keys = [key for key in keys if not key[0].isdigit()]

        return sorted(keys)
Exemple #36
0
class PaginatorSession:
  def __init__(self, bot, ctx:Context, timeout=60, pages:Optional[List[discord.Embed]]=None, footer:Union[str, None]=None, delete_after:bool=True):
    self.bot = bot

    self.footer = footer  # footer message
    self.ctx = ctx  # ctx
    self.timeout = timeout  # when the reactions get cleared, int[seconds]
    self.pages = pages  # the list of embeds list[discord.Embed, discord.Embed]
    self.running = False  # currently running, bool
    self.message = None  # current message being paginated, discord.Message
    self.current = 0  # current page index, int
    self.delete_after = delete_after

    self.reactions = OrderedDict({
      '⏮': self.__first_page,
      '◀': self.__previous_page,
      '⏹': self.__close,
      '▶': self.__next_page,
      '⏭': self.__last_page
    })

  def __valid_page(self, index):
    val_check = 0 <= index < len(self.pages)
    return val_check  # checks if input index is valid

  async def __show_page(self, index: int):
    if not self.__valid_page(index):
      return  # checks for a valid page

    self.current = index
    page = self.pages[index]  # gets the page
    if self.footer is not None:
      page.set_footer(text=self.footer)  # sets footer

    page.set_author(name=f"{index + 1}/{len(self.pages)}")

    if self.running:
      # if the first embed was sent, it edits it
      await self.message.edit(embed=page)
    else:
      self.running = True
      # sends the message
      self.message = await self.ctx.send(embed=page)

      # adds reactions
      for reaction in self.reactions.keys():
        if len(self.pages) <= 2 and reaction in '⏮⏭':
          continue  # ignores 2 page embed first and last emojis
        if len(self.pages) == 1 and reaction != '⏹':
          continue
        await self.message.add_reaction(reaction)

  def __react_check(self, reaction, user):
    if reaction.message.id != self.message.id:
      return False  # not the same message
    if user.id != self.ctx.author.id:
      return False  # not the same user
    if reaction.emoji in self.reactions.keys():
      return True  # reaction was one of the pagination emojis

  async def run(self):
    if not self.running:
      await self.__show_page(0)

    while self.running:
      try:
        # waits for reaction using react_check
        reaction, user = await self.ctx.bot.wait_for('reaction_add', check=self.__react_check, timeout=self.timeout)

        try:
          await self.message.remove_reaction(reaction, user)
        except:
          pass

        try:
          action = self.reactions[reaction.emoji]
          if action is not None:
            await action()
        except:
          pass
      except asyncio.TimeoutError:
        self.running = False
        await self.__close()
        break

  # all functions with await must be async
  async def __first_page(self):
    if self.current == 0: return
    return await self.__show_page(0)

  async def __last_page(self):
    if self.current == len(self.pages) - 1: return
    return await self.__show_page(len(self.pages) - 1)

  async def __next_page(self):
    return await self.__show_page(self.current + 1)

  async def __previous_page(self):
    return await self.__show_page(self.current - 1)

  async def __close(self):
    self.running = False
    try:
      if self.delete_after:
        await self.message.delete()
    except:
      pass
Exemple #37
0
def from_xml(cls,
             path,
             sheet=None,
             skip_lines=0,
             header=True,
             read_only=True,
             reset_dimensions=False,
             **kwargs):
    """
    Parse an XLSX file.

    :param path:
        Path to an XLSX file to load or a file-like object for one.
    :param sheet:
        The names or integer indices of the worksheets to load. If not specified
        then the "active" sheet will be used.
    :param skip_lines:
        The number of rows to skip from the top of the sheet.
    :param header:
        If :code:`True`, the first row is assumed to contain column names.
    :param reset_dimensions:
        If :code:`True`, do not trust the dimensions in the file's properties, 
        and recalculate them based on the data in the file.
    """
    if not isinstance(skip_lines, int):
        raise ValueError('skip_lines argument must be an int')

    if hasattr(path, 'read'):
        f = path
    else:
        f = open(path, 'rb')

    book = openpyxl.load_workbook(f, read_only=read_only, data_only=True)

    multiple = agate.utils.issequence(sheet)
    if multiple:
        sheets = sheet
    else:
        sheets = [sheet]

    tables = OrderedDict()

    for i, sheet in enumerate(sheets):
        if isinstance(sheet, six.string_types):
            try:
                sheet = book[sheet]
            except KeyError:
                f.close()
                raise
        elif isinstance(sheet, int):
            try:
                sheet = book.worksheets[sheet]
            except IndexError:
                f.close()
                raise
        else:
            sheet = book.active

        column_names = None
        rows = []

        if reset_dimensions:
            sheet.reset_dimensions()

        for i, row in enumerate(sheet.iter_rows(min_row=skip_lines + 1)):
            if i == 0 and header:
                column_names = [
                    None if c.value is None else six.text_type(c.value)
                    for c in row
                ]
                continue

            values = []

            for c in row:
                value = c.value

                if value.__class__ is datetime.datetime:
                    # Handle default XLSX date as 00:00 time
                    if value.date() == datetime.date(
                            1904, 1, 1) and not has_date_elements(c):
                        value = value.time()

                        value = normalize_datetime(value)
                    elif value.time() == NULL_TIME:
                        value = value.date()
                    else:
                        value = normalize_datetime(value)

                values.append(value)

            rows.append(values)

        if 'column_names' in kwargs:
            if not header:
                column_names = kwargs['column_names']
            del kwargs['column_names']

        tables[sheet.title] = agate.Table(rows, column_names, **kwargs)

    f.close()

    if multiple:
        return agate.MappedSequence(tables.values(), tables.keys())
    else:
        return tables.popitem()[1]
Exemple #38
0
    def aws_prepare_request(self, payload, reference=None):
        """
        Takes the intended payload and returns the headers for it.

        The payload is presumed to have been already urlencoded()

        """

        # Define our AWS header
        headers = {
            'User-Agent': self.app_id,
            'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8',

            # Populated below
            'Content-Length': 0,
            'Authorization': None,
            'X-Amz-Date': None,
        }

        # Get a reference time (used for header construction)
        reference = datetime.utcnow()

        # Provide Content-Length
        headers['Content-Length'] = str(len(payload))

        # Amazon Date Format
        amzdate = reference.strftime('%Y%m%dT%H%M%SZ')
        headers['X-Amz-Date'] = amzdate

        # Credential Scope
        scope = '{date}/{region}/{service}/{request}'.format(
            date=reference.strftime('%Y%m%d'),
            region=self.aws_region_name,
            service=self.aws_service_name,
            request=self.aws_auth_request,
        )

        # Similar to headers; but a subset.  keys must be lowercase
        signed_headers = OrderedDict([
            ('content-type', headers['Content-Type']),
            ('host', '{service}.{region}.amazonaws.com'.format(
                service=self.aws_service_name,
                region=self.aws_region_name)),
            ('x-amz-date', headers['X-Amz-Date']),
        ])

        #
        # Build Canonical Request Object
        #
        canonical_request = '\n'.join([
            # Method
            u'POST',

            # URL
            self.aws_canonical_uri,

            # Query String (none set for POST)
            '',

            # Header Content (must include \n at end!)
            # All entries except characters in amazon date must be
            # lowercase
            '\n'.join(['%s:%s' % (k, v)
                      for k, v in signed_headers.items()]) + '\n',

            # Header Entries (in same order identified above)
            ';'.join(signed_headers.keys()),

            # Payload
            sha256(payload.encode('utf-8')).hexdigest(),
        ])

        # Prepare Unsigned Signature
        to_sign = '\n'.join([
            self.aws_auth_algorithm,
            amzdate,
            scope,
            sha256(canonical_request.encode('utf-8')).hexdigest(),
        ])

        # Our Authorization header
        headers['Authorization'] = ', '.join([
            '{algorithm} Credential={key}/{scope}'.format(
                algorithm=self.aws_auth_algorithm,
                key=self.aws_access_key_id,
                scope=scope,
            ),
            'SignedHeaders={signed_headers}'.format(
                signed_headers=';'.join(signed_headers.keys()),
            ),
            'Signature={signature}'.format(
                signature=self.aws_auth_signature(to_sign, reference)
            ),
        ])

        return headers
Exemple #39
0
class SourceCatalogRegistry(object):
    """Source catalog registry.

    Provides simple and efficient access to source catalogs
    by lazy-loading and caching catalog objects.

    You should use these catalogs read-only, if you modify
    them you can get non-reproducible results if you access
    the modified version later on.
    """
    def __init__(self):
        self._available_catalogs = OrderedDict()
        self._loaded_catalogs = OrderedDict()

    @classmethod
    def builtins(cls):
        """Factory function that make a catalog registry and
        registers the built-in catalogs.
        """
        source_catalogs = cls()

        from .fermi import SourceCatalog3FGL
        source_catalogs.register('3fgl', SourceCatalog3FGL)

        from .fermi import SourceCatalog2FHL
        source_catalogs.register('2fhl', SourceCatalog2FHL)

        import os
        if 'HGPS_ANALYSIS' in os.environ:
            from .hess import SourceCatalogHGPS
            source_catalogs.register('hgps', SourceCatalogHGPS)

        return source_catalogs

    @property
    def catalog_names(self):
        return list(self._available_catalogs.keys())

    def register(self, name, factory, args=()):
        """Register a source catalog.

        It must be possible to load it via ``factory(*args)``.
        """
        data = dict(factory=factory, args=args)
        self._available_catalogs[name] = data

    def __getitem__(self, name):
        if name not in self._available_catalogs:
            msg = 'Unknown catalog: "{}". '.format(name)
            msg += 'Available catalogs: {}'.format(self.catalog_names)
            raise KeyError(msg)

        if name not in self._loaded_catalogs:
            cat = self._available_catalogs[name]
            factory = cat['factory']
            args = cat['args']
            self._loaded_catalogs[name] = factory(*args)

        return self._loaded_catalogs[name]

    def info(self, file=None):
        """Print summary info about catalogs.
        """
        if not file:
            file = sys.stdout

        print('Source catalog registry:', file=file)
        # TODO: how can we print to file?
        self.info_table.pprint()

    @property
    def info_table(self):
        """Summary info table on catalogs.

        Loads all catalogs.
        """
        table = []
        for name in self._available_catalogs.keys():
            cat = self[name]
            data = dict()
            data['Name'] = name
            data['Description'] = cat.description
            data['Sources'] = len(cat.table)
            table.append(data)
        table = Table(rows=table, names=['Name', 'Description', 'Sources'])
        return table
    try:
        myID = int(os.environ["SLURM_ARRAY_TASK_ID"])
        totalIDs = int(os.environ["SLURM_ARRAY_TASK_MAX"])
    except KeyError:
        myID = 1
        totalIDs = 1

    print("Job %s of %s reporting in!" % (myID, totalIDs))

    runPars = OrderedDict([('model', ['ica','pca']),
        # ('model', models.models),
        ('features',  [10, 30, 50]),
        ('held_out_subj', np.arange(10))])

    # cartesian over param settings
    allpar = [dict(parset) for parset in (zip(runPars.keys(), p)
              for p in product(*runPars.values()))]

    pointsPerId = len(allpar) / totalIDs
    start = int((myID-1)*pointsPerId)
    end = int(len(allpar) if myID == totalIDs else (myID)*pointsPerId)
    print("Doing Params %s to %s (inclusive)" % (start, end-1))
    # mypar = allpar[start:end]

    for parnum in range(start, end):
        mypar = allpar[parnum]        
        fname = '%s/results_loo_recon_%s_%ifeatures_subj%i.csv' % (outfile_path, mypar['model'], mypar['features'], mypar['held_out_subj'])
        if Path(fname).exists(): 
            print("Found %s, skipping" % fname)
            continue
        else:
class ODEFunc(object):
	def __init__(self, SpeciesCount = 12):
		self.SpeciesCount = SpeciesCount
		self.Conc_0 = np.zeros(SpeciesCount, np.float)
		self.CreateTempData()
		self.Params = {}
		
	def SetConc(self, Conc):
		self.Conc_0 = np.array(Conc[:], np.float)

	def CreateTempData(self):
		self.dydat = np.zeros(self.SpeciesCount,np.float)
		self.ydatM = np.zeros((self.SpeciesCount,)*2,np.float)
		
	def Der(self, ydat, Time_Initial):
		ydat = np.array(ydat,np.float)
		self.ydatM = np.outer(ydat,ydat)
		return np.sum(self.Coef1D*ydat,axis=1)+np.sum(np.sum(self.ydatM*self.Coef2D,axis=1),axis=1)+self.Synth
	
	def SetCoef(self, Coef1D, Coef2D):
		self.Coef1D = Coef1D
		self.Coef2D = Coef2D
		self.SpeciesRange = np.arange(len(self.Coef1D))
	
	def GetSDataFromFiles(self, filename="species3"):
		try:
			self.Species = OrderedDict()
			with open(filename,"r") as SpeciesFile:
				SpeciesList = SpeciesFile.read().strip().split("\n")
				for S in SpeciesList:
					L = S.split(";;")
					self.Species[int(L[0])] = L[1]
		
			self.Species = OrderedDict(sorted(self.Species.items(), key=lambda x: x[0]))
			self.SpeciesConvert = OrderedDict()
			for i, S in enumerate(map(int,self.Species.keys())):
				self.SpeciesConvert[int(S)] = i

			self.SpeciesCount = len(self.Species.keys())
			self.CreateTempData()
		except:
			return False

	def GetRDataFromFiles(self, filename = "react3"):
		try:
			self.Reactions = []
			with open(filename,"r") as ReactFile:
				ReactList = ReactFile.read().strip().split("\n")
				ReactList = map(lambda x: x.split("\t"), ReactList)
				for R in ReactList:
					R = map(lambda x: x.split(";;"), R)
					R = map(lambda x: x[0], R)
					R = map(lambda x: x.split(","), R)
					# Drop Representation
					R = list(itertools.chain(*R))
					R = map(int, R)
					R[0]=self.SpeciesConvert[R[0]]
					R[1]=self.SpeciesConvert[R[1]]
					if R[0] > R[1]:
						R0 = R[0]
						R1 = R[1]
						R[0] = R1
						R[1] = R0
					R[3]=self.SpeciesConvert[R[3]]
					self.Reactions.append(R)

				# Sort Reactions by Product, R1, R2 (R1 is always less than R2)
				SortList = map(list,list(np.array(self.Reactions)[:,(3,0,1)]))
				L = range(len(SortList))
				for i in L:
					SortList[i].append(i)
				SortOrder = np.array(sorted(SortList))[:,3]
				self.Reactions = map(list,list(np.array(self.Reactions)[SortOrder]))
				
				return True
		except Exception as e:
			print e
			return False
	def GenCoef(
						self,
						Kd1,
						Kd2,
						Kp,
						Delta,
						BondCoef = (-9.0/0.6),
						A0 = False,
						Keff_Ident = False
						):
		"""
		If SynDeg is being used make sure to update Conc_0 before updating GenCoef
		"""
		if A0:
			self.SetInitialConc(A0)

		Coef2D = np.zeros((self.SpeciesCount,)*3,np.float)
		Coef1D = np.zeros((self.SpeciesCount,)*2,np.float)
		for R in self.Reactions:
			if Keff_Ident:
				Keff = 1.0
			else:
				Keff = (Kd1**R[4])*(Kd2**R[5])*np.exp((R[4]+R[5]-1)*BondCoef)
#			Forward:
			Coef2D[R[0],R[0],R[1]] -= R[2]
			Coef2D[R[1],R[0],R[1]] -= R[2]
			Coef2D[R[3],R[0],R[1]] += R[2]
#			Reverse:
			Coef1D[R[0],R[3]] += R[6]*Keff
			Coef1D[R[1],R[3]] += R[6]*Keff
			Coef1D[R[3],R[3]] -= R[6]*Keff
			
		#for j in range(self.SpeciesCount):
		#	for i in range(self.SpeciesCount):
		#		Coef2D[j,i,i] = Coef2D[j,i,i]/2.0

		Coef1D = Coef1D*Kp
		Coef2D = Coef2D*Kp

		self.Synth = np.zeros(self.SpeciesCount, np.float)
		self.Synth[0] = self.Conc_0[0]*Delta # Synthesis
		for Species in range(self.SpeciesCount):
			Coef1D[Species,Species] -= Delta # Dilution Effect

		self.Kp = Kp
		self.Kd1 = Kd1
		self.Kd2 = Kd2
		self.SetCoef(Coef1D, Coef2D)
	
	def UpdateKp(self, Kp):
		self.Coef1D = Kp*self.Coef1D/(self.Kp)
		self.Coef2D = Kp*self.Coef2D/(self.Kp)
		self.Kp = Kp
		
	def UpdateKd1(self, Kd1):
		self.GenCoef(Kd1, self.Kd2,self.Kp)
		self.Kd1 = Kd1
		
	def UpdateKd2(self, Kd2):
		self.GenCoef(self.Kd1, Kd2,self.Kp)
		self.Kd2 = Kd2
	
	def SetTimeRange(self, Start, End, N = 1e5, IncludeZero = True, IncludePowersof10 = True):
		# Overkill method for finding what powers of 10 are missing
		Time = np.logspace(Start,End,num=N)

		if (0.0 not in Time) and IncludeZero:
			Time = np.hstack((0.0,Time))

		if IncludePowersof10:
			Add = 10.0**np.arange(Start,End+1)[
				np.array(
					[(10.0**i not in Time) for i in range(Start,End+1)]
					)
				]
			if len(Add) > 0:
				Time = np.sort(np.hstack([Time,Add]))
		self.Time = Time

	def SetInitialConc(self, A0):
			self.Conc_0 = np.zeros(self.SpeciesCount, np.float)
			self.Conc_0[0] = A0

	def RunODEINT(
							self,
							#rtol = 1e10, # Used 1e-8 for 2Static (1e-10 Normally)
							#atol = 1e-10, # Used 1e-20 for 2Static (1e-25 Normally)
							rtol = 1e-16, # Used 1e-8 for 2Static (1e-10 Normally)
							atol = 1e-20, # Used 1e-20 for 2Static (1e-25 Normally)
							ResultsOnly=True,
							ReturnOutMessage=False,
							ReturnAll=False,
							mxstep=int(5e8),
							mxordn=int(5e7),
							mxords=int(5e7),
							hmin = 0,
							hmax = 0,
							):
		self.LastResults = odeint(
												self.Der,
												self.Conc_0,
												self.Time,
												full_output=True,
												rtol=rtol,
												atol=atol,
												mxstep = mxstep,
												mxordn  = mxordn ,
												mxords  = mxords ,
												hmin = hmin,
												hmax = hmax
												)
		if ReturnAll:
			return self.LastResults[0], self.LastResults[1]
		if ReturnOutMessage:
			return self.LastResults[0], self.LastResults[1]["message"]
		if ResultsOnly:
			return self.LastResults[0]
		return self.LastResults
Exemple #42
0
def main(argv):
    # The file we are currently processing, if it is "cmd_line.json" everything will be processed.
    process_file = argv[1]

    if process_file is None:
        logging.debug(
            "No file was received, do not go on processing the other actions. Just leave for now."
        )
        return

    json_type = os.path.basename(process_file).split('.json')[0]

    # The "GLOBAL" Configuration object
    config = CsConfig()

    logging.basicConfig(filename=config.get_logger(),
                        level=config.get_level(),
                        format=config.get_format())

    # Load stored ip addresses from disk to CsConfig()
    config.set_address()

    logging.debug("Configuring ip addresses")
    config.address().compare()
    config.address().process()

    databag_map = OrderedDict([
        ("guest_network", {
            "process_iptables": True,
            "executor": []
        }),
        ("vm_password", {
            "process_iptables": False,
            "executor": [CsPassword("vmpassword", config)]
        }),
        ("vm_metadata", {
            "process_iptables": False,
            "executor": [CsVmMetadata('vmdata', config)]
        }), ("network_acl", {
            "process_iptables": True,
            "executor": []
        }), ("firewall_rules", {
            "process_iptables": True,
            "executor": []
        }), ("forwarding_rules", {
            "process_iptables": True,
            "executor": []
        }), ("staticnat_rules", {
            "process_iptables": True,
            "executor": []
        }), ("site_2_site_vpn", {
            "process_iptables": True,
            "executor": []
        }), ("remote_access_vpn", {
            "process_iptables": True,
            "executor": []
        }),
        ("vpn_user_list", {
            "process_iptables": False,
            "executor": [CsVpnUser("vpnuserlist", config)]
        }),
        ("vm_dhcp_entry", {
            "process_iptables": False,
            "executor": [CsDhcp("dhcpentry", config)]
        }),
        ("dhcp", {
            "process_iptables": False,
            "executor": [CsDhcp("dhcpentry", config)]
        }), ("load_balancer", {
            "process_iptables": True,
            "executor": []
        }),
        ("monitor_service", {
            "process_iptables": False,
            "executor": [CsMonitor("monitorservice", config)]
        }),
        ("static_routes", {
            "process_iptables": False,
            "executor": [CsStaticRoutes("staticroutes", config)]
        })
    ])

    def execDatabag(key, db):
        if key not in db.keys() or 'executor' not in db[key]:
            logging.warn(
                "Unable to find config or executor(s) for the databag type %s"
                % key)
            return
        for executor in db[key]['executor']:
            logging.debug("Processing for databag type: %s" % key)
            executor.process()

    def execIptables(config):
        logging.debug("Processing iptables rules")
        iptables_executor = IpTablesExecutor(config)
        iptables_executor.process()

    if json_type == "cmd_line":
        logging.debug(
            "cmd_line.json changed. All other files will be processed as well."
        )
        for key in databag_map.keys():
            execDatabag(key, databag_map)
        execIptables(config)
    elif json_type in databag_map.keys():
        execDatabag(json_type, databag_map)
        if databag_map[json_type]['process_iptables']:
            execIptables(config)
    else:
        logging.warn(
            "Unable to find and process databag for file: %s, for json type=%s"
            % (process_file, json_type))

    red = CsRedundant(config)
    red.set()
    return 0
Exemple #43
0
class FieldMetadata(object):
    '''
    key: the key to the dictionary is:
    - for standard fields, the metadata field name.
    - for custom fields, the metadata field name prefixed by '#'
    This is done to create two 'namespaces' so the names don't clash

    label: the actual column label. No prefixing.

    datatype: the type of information in the field. Valid values are listed in
    VALID_DATA_TYPES below.
    is_multiple: valid for the text datatype. If {}, the field is to be
    treated as a single term. If not None, it contains a dict of the form
            {'cache_to_list': ',',
             'ui_to_list': ',',
             'list_to_ui': ', '}
    where the cache_to_list contains the character used to split the value in
    the meta2 table, ui_to_list contains the character used to create a list
    from a value shown in the ui (each resulting value must be strip()ed and
    empty values removed), and list_to_ui contains the string used in join()
    to create a displayable string from the list.

    kind == field: is a db field.
    kind == category: standard tag category that isn't a field. see news.
    kind == user: user-defined tag category.
    kind == search: saved-searches category.

    is_category: is a tag browser category. If true, then:
       table: name of the db table used to construct item list
       column: name of the column in the normalized table to join on
       link_column: name of the column in the connection table to join on. This
                    key should not be present if there is no link table
       category_sort: the field in the normalized table to sort on. This
                      key must be present if is_category is True
       If these are None, then the category constructor must know how
       to build the item list (e.g., formats, news).
       The order below is the order that the categories will
       appear in the tags pane.

    name: the text that is to be used when displaying the field. Column headings
    in the GUI, etc.

    search_terms: the terms that can be used to identify the field when
    searching. They can be thought of as aliases for metadata keys, but are only
    valid when passed to search().

    is_custom: the field has been added by the user.

    rec_index: the index of the field in the db metadata record.

    is_csp: field contains colon-separated pairs. Must also be text, is_multiple

    '''

    VALID_DATA_TYPES = frozenset([
        None, 'rating', 'text', 'comments', 'datetime', 'int', 'float', 'bool',
        'series', 'composite', 'enumeration'
    ])

    # search labels that are not db columns
    search_items = ['all', 'search']
    __calibre_serializable__ = True

    def __init__(self):
        self._field_metadata = _builtin_field_metadata()
        self._tb_cats = OrderedDict()
        self._tb_custom_fields = {}
        self._search_term_map = {}
        self.custom_label_to_key_map = {}
        for k, v in self._field_metadata:
            if v['kind'] == 'field' and v[
                    'datatype'] not in self.VALID_DATA_TYPES:
                raise ValueError('Unknown datatype %s for field %s' %
                                 (v['datatype'], k))
            self._tb_cats[k] = v
            self._tb_cats[k]['label'] = k
            self._tb_cats[k]['display'] = {}
            self._tb_cats[k]['is_editable'] = True
            self._add_search_terms_to_map(k, v['search_terms'])
        self._tb_cats['timestamp']['display'] = {
            'date_format': tweaks['gui_timestamp_display_format']
        }
        self._tb_cats['pubdate']['display'] = {
            'date_format': tweaks['gui_pubdate_display_format']
        }
        self._tb_cats['last_modified']['display'] = {
            'date_format': tweaks['gui_last_modified_display_format']
        }
        self.custom_field_prefix = '#'
        self.get = self._tb_cats.get

    def __getitem__(self, key):
        if key == 'title_sort':
            return self._tb_cats['sort']
        return self._tb_cats[key]

    def __setitem__(self, key, val):
        raise AttributeError('Assigning to this object is forbidden')

    def __delitem__(self, key):
        del self._tb_cats[key]

    def __iter__(self):
        for key in self._tb_cats:
            yield key

    def __contains__(self, key):
        return key in self._tb_cats or key == 'title_sort'

    def has_key(self, key):
        return key in self

    def keys(self):
        return self._tb_cats.keys()

    def __eq__(self, other):
        if not isinstance(other, FieldMetadata):
            return False
        for attr in ('_tb_custom_fields', '_search_term_map',
                     'custom_label_to_key_map', 'custom_field_prefix'):
            if getattr(self, attr) != getattr(other, attr):
                return False
        return dict(self._tb_cats) == dict(other._tb_cats)

    def __ne__(self, other):
        return not self.__eq__(other)

    def sortable_field_keys(self):
        return [
            k for k in self._tb_cats.keys()
            if self._tb_cats[k]['kind'] == 'field'
            and self._tb_cats[k]['datatype'] is not None
        ]

    def ui_sortable_field_keys(self):
        ans = {
            k: self._tb_cats[k]['name']
            for k in set(self.sortable_field_keys()) - {
                'sort',
                'author_sort',
                'au_map',
                'series_sort',
                'marked',
                'series_index',
                'path',
                'formats',
                'identifiers',
                'uuid',
                'comments',
            } if self._tb_cats[k]['name']
        }
        ans['cover'] = _('Has cover')
        return ans

    def displayable_field_keys(self):
        return [
            k for k in self._tb_cats.keys()
            if self._tb_cats[k]['kind'] == 'field'
            and self._tb_cats[k]['datatype'] is not None and k not in (
                'au_map', 'marked', 'ondevice', 'cover',
                'series_sort') and not self.is_series_index(k)
        ]

    def standard_field_keys(self):
        return [
            k for k in self._tb_cats.keys()
            if self._tb_cats[k]['kind'] == 'field'
            and not self._tb_cats[k]['is_custom']
        ]

    def custom_field_keys(self, include_composites=True):
        res = []
        for k in self._tb_cats.keys():
            fm = self._tb_cats[k]
            if fm['kind']=='field' and fm['is_custom'] and \
                   (fm['datatype'] != 'composite' or include_composites):
                res.append(k)
        return res

    def all_field_keys(self):
        return [
            k for k in self._tb_cats.keys()
            if self._tb_cats[k]['kind'] == 'field'
        ]

    def iterkeys(self):
        for key in self._tb_cats:
            yield key

    def itervalues(self):
        return self._tb_cats.itervalues()

    def values(self):
        return self._tb_cats.values()

    def iteritems(self):
        for key in self._tb_cats:
            yield (key, self._tb_cats[key])

    def custom_iteritems(self):
        for key, meta in self._tb_custom_fields.iteritems():
            yield (key, meta)

    def items(self):
        return list(self.iteritems())

    def is_custom_field(self, key):
        return key.startswith(self.custom_field_prefix)

    def is_ignorable_field(self, key):
        'Custom fields and user categories are ignorable'
        return self.is_custom_field(key) or key.startswith('@')

    def ignorable_field_keys(self):
        return [
            k for k in self._tb_cats.iterkeys() if self.is_ignorable_field(k)
        ]

    def is_series_index(self, key):
        try:
            m = self._tb_cats[key]
            return (m['datatype'] == 'float' and key.endswith('_index')
                    and key[:-6] in self._tb_cats)
        except (KeyError, ValueError, TypeError, AttributeError):
            return False

    def key_to_label(self, key):
        if 'label' not in self._tb_cats[key]:
            return key
        return self._tb_cats[key]['label']

    def label_to_key(self, label, prefer_custom=False):
        if prefer_custom:
            if label in self.custom_label_to_key_map:
                return self.custom_label_to_key_map[label]
        if 'label' in self._tb_cats:
            return label
        if not prefer_custom:
            if label in self.custom_label_to_key_map:
                return self.custom_label_to_key_map[label]
        raise ValueError('Unknown key [%s]' % (label))

    def all_metadata(self):
        l = {}
        for k in self._tb_cats:
            l[k] = self._tb_cats[k]
        return l

    def custom_field_metadata(self, include_composites=True):
        if include_composites:
            return self._tb_custom_fields
        l = {}
        for k in self.custom_field_keys(include_composites):
            l[k] = self._tb_cats[k]
        return l

    def add_custom_field(self,
                         label,
                         table,
                         column,
                         datatype,
                         colnum,
                         name,
                         display,
                         is_editable,
                         is_multiple,
                         is_category,
                         is_csp=False):
        key = self.custom_field_prefix + label
        if key in self._tb_cats:
            raise ValueError('Duplicate custom field [%s]' % (label))
        if datatype not in self.VALID_DATA_TYPES:
            raise ValueError('Unknown datatype %s for field %s' %
                             (datatype, key))
        self._tb_cats[key] = {
            'table': table,
            'column': column,
            'datatype': datatype,
            'is_multiple': is_multiple,
            'kind': 'field',
            'name': name,
            'search_terms': [key],
            'label': label,
            'colnum': colnum,
            'display': display,
            'is_custom': True,
            'is_category': is_category,
            'link_column': 'value',
            'category_sort': 'value',
            'is_csp': is_csp,
            'is_editable': is_editable,
        }
        self._tb_custom_fields[key] = self._tb_cats[key]
        self._add_search_terms_to_map(key, [key])
        self.custom_label_to_key_map[label] = key
        if datatype == 'series':
            key += '_index'
            self._tb_cats[key] = {
                'table': None,
                'column': None,
                'datatype': 'float',
                'is_multiple': {},
                'kind': 'field',
                'name': '',
                'search_terms': [key],
                'label': label + '_index',
                'colnum': None,
                'display': {},
                'is_custom': False,
                'is_category': False,
                'link_column': None,
                'category_sort': None,
                'is_editable': False,
                'is_csp': False
            }
            self._add_search_terms_to_map(key, [key])
            self.custom_label_to_key_map[label + '_index'] = key

    def remove_dynamic_categories(self):
        for key in list(self._tb_cats.keys()):
            val = self._tb_cats[key]
            if val['is_category'] and val['kind'] in ('user', 'search'):
                for k in self._tb_cats[key]['search_terms']:
                    if k in self._search_term_map:
                        del self._search_term_map[k]
                del self._tb_cats[key]

    def remove_user_categories(self):
        for key in list(self._tb_cats.keys()):
            val = self._tb_cats[key]
            if val['is_category'] and val['kind'] == 'user':
                for k in self._tb_cats[key]['search_terms']:
                    if k in self._search_term_map:
                        del self._search_term_map[k]
                del self._tb_cats[key]

    def _remove_grouped_search_terms(self):
        to_remove = [
            v for v in self._search_term_map
            if isinstance(self._search_term_map[v], list)
        ]
        for v in to_remove:
            del self._search_term_map[v]

    def add_grouped_search_terms(self, gst):
        self._remove_grouped_search_terms()
        for t in gst:
            try:
                self._add_search_terms_to_map(gst[t], [t])
            except ValueError:
                traceback.print_exc()

    def cc_series_index_column_for(self, key):
        return self._tb_cats[key]['rec_index'] + 1

    def add_user_category(self, label, name):
        if label in self._tb_cats:
            raise ValueError('Duplicate user field [%s]' % (label))
        st = [label]
        if icu_lower(label) != label:
            st.append(icu_lower(label))
        self._tb_cats[label] = {
            'table': None,
            'column': None,
            'datatype': None,
            'is_multiple': {},
            'kind': 'user',
            'name': name,
            'search_terms': st,
            'is_custom': False,
            'is_category': True,
            'is_csp': False
        }
        self._add_search_terms_to_map(label, st)

    def add_search_category(self, label, name):
        if label in self._tb_cats:
            raise ValueError('Duplicate user field [%s]' % (label))
        self._tb_cats[label] = {
            'table': None,
            'column': None,
            'datatype': None,
            'is_multiple': {},
            'kind': 'search',
            'name': name,
            'search_terms': [],
            'is_custom': False,
            'is_category': True,
            'is_csp': False
        }

    def set_field_record_index(self, label, index, prefer_custom=False):
        if prefer_custom:
            key = self.custom_field_prefix + label
            if key not in self._tb_cats:
                key = label
        else:
            if label in self._tb_cats:
                key = label
            else:
                key = self.custom_field_prefix + label
        self._tb_cats[key]['rec_index'] = index  # let the exception fly ...

    def get_search_terms(self):
        s_keys = sorted(self._search_term_map.keys())
        for v in self.search_items:
            s_keys.append(v)
        return s_keys

    def _add_search_terms_to_map(self, key, terms):
        if terms is not None:
            for t in terms:
                if t in self._search_term_map:
                    raise ValueError(
                        'Attempt to add duplicate search term "%s"' % t)
                self._search_term_map[t] = key

    def search_term_to_field_key(self, term):
        return self._search_term_map.get(term, term)

    def searchable_fields(self):
        return [
            k for k in self._tb_cats.keys()
            if self._tb_cats[k]['kind'] == 'field'
            and len(self._tb_cats[k]['search_terms']) > 0
        ]
Exemple #44
0
class ServiceAreaFromPoint(QgisAlgorithm):

    INPUT = 'INPUT'
    START_POINT = 'START_POINT'
    STRATEGY = 'STRATEGY'
    TRAVEL_COST = 'TRAVEL_COST'
    DIRECTION_FIELD = 'DIRECTION_FIELD'
    VALUE_FORWARD = 'VALUE_FORWARD'
    VALUE_BACKWARD = 'VALUE_BACKWARD'
    VALUE_BOTH = 'VALUE_BOTH'
    DEFAULT_DIRECTION = 'DEFAULT_DIRECTION'
    SPEED_FIELD = 'SPEED_FIELD'
    DEFAULT_SPEED = 'DEFAULT_SPEED'
    TOLERANCE = 'TOLERANCE'
    INCLUDE_BOUNDS = 'INCLUDE_BOUNDS'
    OUTPUT = 'OUTPUT'
    OUTPUT_LINES = 'OUTPUT_LINES'

    def icon(self):
        return QIcon(os.path.join(pluginPath, 'images', 'networkanalysis.svg'))

    def group(self):
        return self.tr('Network analysis')

    def groupId(self):
        return 'networkanalysis'

    def __init__(self):
        super().__init__()

    def initAlgorithm(self, config=None):
        self.DIRECTIONS = OrderedDict([
            (self.tr('Forward direction'),
             QgsVectorLayerDirector.DirectionForward),
            (self.tr('Backward direction'),
             QgsVectorLayerDirector.DirectionBackward),
            (self.tr('Both directions'), QgsVectorLayerDirector.DirectionBoth)
        ])

        self.STRATEGIES = [self.tr('Shortest'), self.tr('Fastest')]

        self.addParameter(
            QgsProcessingParameterFeatureSource(
                self.INPUT, self.tr('Vector layer representing network'),
                [QgsProcessing.TypeVectorLine]))
        self.addParameter(
            QgsProcessingParameterPoint(self.START_POINT,
                                        self.tr('Start point')))
        self.addParameter(
            QgsProcessingParameterEnum(self.STRATEGY,
                                       self.tr('Path type to calculate'),
                                       self.STRATEGIES,
                                       defaultValue=0))
        self.addParameter(
            QgsProcessingParameterNumber(
                self.TRAVEL_COST,
                self.tr(
                    'Travel cost (distance for "Shortest", time for "Fastest")'
                ), QgsProcessingParameterNumber.Double, 0.0, False, 0))

        params = []
        params.append(
            QgsProcessingParameterField(self.DIRECTION_FIELD,
                                        self.tr('Direction field'),
                                        None,
                                        self.INPUT,
                                        optional=True))
        params.append(
            QgsProcessingParameterString(
                self.VALUE_FORWARD,
                self.tr('Value for forward direction'),
                optional=True))
        params.append(
            QgsProcessingParameterString(
                self.VALUE_BACKWARD,
                self.tr('Value for backward direction'),
                optional=True))
        params.append(
            QgsProcessingParameterString(self.VALUE_BOTH,
                                         self.tr('Value for both directions'),
                                         optional=True))
        params.append(
            QgsProcessingParameterEnum(self.DEFAULT_DIRECTION,
                                       self.tr('Default direction'),
                                       list(self.DIRECTIONS.keys()),
                                       defaultValue=2))
        params.append(
            QgsProcessingParameterField(self.SPEED_FIELD,
                                        self.tr('Speed field'),
                                        None,
                                        self.INPUT,
                                        optional=True))
        params.append(
            QgsProcessingParameterNumber(self.DEFAULT_SPEED,
                                         self.tr('Default speed (km/h)'),
                                         QgsProcessingParameterNumber.Double,
                                         5.0, False, 0))
        params.append(
            QgsProcessingParameterDistance(self.TOLERANCE,
                                           self.tr('Topology tolerance'), 0.0,
                                           self.INPUT, False, 0))
        params.append(
            QgsProcessingParameterBoolean(
                self.INCLUDE_BOUNDS,
                self.tr('Include upper/lower bound points'),
                defaultValue=False))

        for p in params:
            p.setFlags(p.flags()
                       | QgsProcessingParameterDefinition.FlagAdvanced)
            self.addParameter(p)

        lines_output = QgsProcessingParameterFeatureSink(
            self.OUTPUT_LINES,
            self.tr('Service area (lines)'),
            QgsProcessing.TypeVectorLine,
            optional=True)
        lines_output.setCreateByDefault(True)
        self.addParameter(lines_output)

        nodes_output = QgsProcessingParameterFeatureSink(
            self.OUTPUT,
            self.tr('Service area (boundary nodes)'),
            QgsProcessing.TypeVectorPoint,
            optional=True)
        nodes_output.setCreateByDefault(False)
        self.addParameter(nodes_output)

    def name(self):
        return 'serviceareafrompoint'

    def displayName(self):
        return self.tr('Service area (from point)')

    def processAlgorithm(self, parameters, context, feedback):
        network = self.parameterAsSource(parameters, self.INPUT, context)
        if network is None:
            raise QgsProcessingException(
                self.invalidSourceError(parameters, self.INPUT))

        startPoint = self.parameterAsPoint(parameters, self.START_POINT,
                                           context, network.sourceCrs())
        strategy = self.parameterAsEnum(parameters, self.STRATEGY, context)
        travelCost = self.parameterAsDouble(parameters, self.TRAVEL_COST,
                                            context)

        directionFieldName = self.parameterAsString(parameters,
                                                    self.DIRECTION_FIELD,
                                                    context)
        forwardValue = self.parameterAsString(parameters, self.VALUE_FORWARD,
                                              context)
        backwardValue = self.parameterAsString(parameters, self.VALUE_BACKWARD,
                                               context)
        bothValue = self.parameterAsString(parameters, self.VALUE_BOTH,
                                           context)
        defaultDirection = self.parameterAsEnum(parameters,
                                                self.DEFAULT_DIRECTION,
                                                context)
        speedFieldName = self.parameterAsString(parameters, self.SPEED_FIELD,
                                                context)
        defaultSpeed = self.parameterAsDouble(parameters, self.DEFAULT_SPEED,
                                              context)
        tolerance = self.parameterAsDouble(parameters, self.TOLERANCE, context)

        include_bounds = True  # default to true to maintain 3.0 API
        if self.INCLUDE_BOUNDS in parameters:
            include_bounds = self.parameterAsBool(parameters,
                                                  self.INCLUDE_BOUNDS, context)

        directionField = -1
        if directionFieldName:
            directionField = network.fields().lookupField(directionFieldName)
        speedField = -1
        if speedFieldName:
            speedField = network.fields().lookupField(speedFieldName)

        director = QgsVectorLayerDirector(network, directionField,
                                          forwardValue, backwardValue,
                                          bothValue, defaultDirection)

        distUnit = context.project().crs().mapUnits()
        multiplier = QgsUnitTypes.fromUnitToUnitFactor(
            distUnit, QgsUnitTypes.DistanceMeters)
        if strategy == 0:
            strategy = QgsNetworkDistanceStrategy()
        else:
            strategy = QgsNetworkSpeedStrategy(speedField, defaultSpeed,
                                               multiplier * 1000.0 / 3600.0)

        director.addStrategy(strategy)
        builder = QgsGraphBuilder(network.sourceCrs(), True, tolerance)
        feedback.pushInfo(
            QCoreApplication.translate('ServiceAreaFromPoint',
                                       'Building graph…'))
        snappedPoints = director.makeGraph(builder, [startPoint], feedback)

        feedback.pushInfo(
            QCoreApplication.translate('ServiceAreaFromPoint',
                                       'Calculating service area…'))
        graph = builder.graph()
        idxStart = graph.findVertex(snappedPoints[0])

        tree, cost = QgsGraphAnalyzer.dijkstra(graph, idxStart, 0)
        vertices = set()
        points = []
        lines = []

        for vertex, start_vertex_cost in enumerate(cost):
            inbound_edge_index = tree[vertex]
            if inbound_edge_index == -1 and vertex != idxStart:
                # unreachable vertex
                continue

            if start_vertex_cost > travelCost:
                # vertex is too expensive, discard
                continue

            vertices.add(vertex)
            start_point = graph.vertex(vertex).point()

            # find all edges coming from this vertex
            for edge_id in graph.vertex(vertex).outgoingEdges():
                edge = graph.edge(edge_id)
                end_vertex_cost = start_vertex_cost + edge.cost(0)
                end_point = graph.vertex(edge.toVertex()).point()
                if end_vertex_cost <= travelCost:
                    # end vertex is cheap enough to include
                    vertices.add(edge.toVertex())
                    lines.append([start_point, end_point])
                else:
                    # travelCost sits somewhere on this edge, interpolate position
                    interpolated_end_point = QgsGeometryUtils.interpolatePointOnLineByValue(
                        start_point.x(), start_point.y(), start_vertex_cost,
                        end_point.x(), end_point.y(), end_vertex_cost,
                        travelCost)
                    points.append(interpolated_end_point)
                    lines.append([start_point, interpolated_end_point])

        for i in vertices:
            points.append(graph.vertex(i).point())

        feedback.pushInfo(
            QCoreApplication.translate('ServiceAreaFromPoint',
                                       'Writing results…'))

        fields = QgsFields()
        fields.append(QgsField('type', QVariant.String, '', 254, 0))
        fields.append(QgsField('start', QVariant.String, '', 254, 0))

        feat = QgsFeature()
        feat.setFields(fields)

        (point_sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT,
                                                     context, fields,
                                                     QgsWkbTypes.MultiPoint,
                                                     network.sourceCrs())

        results = {}

        if point_sink is not None:
            results[self.OUTPUT] = dest_id
            geomPoints = QgsGeometry.fromMultiPointXY(points)
            feat.setGeometry(geomPoints)
            feat['type'] = 'within'
            feat['start'] = startPoint.toString()
            point_sink.addFeature(feat, QgsFeatureSink.FastInsert)

            if include_bounds:
                upperBoundary = []
                lowerBoundary = []

                vertices = []
                for i, v in enumerate(cost):
                    if v > travelCost and tree[i] != -1:
                        vertexId = graph.edge(tree[i]).fromVertex()
                        if cost[vertexId] <= travelCost:
                            vertices.append(i)

                for i in vertices:
                    upperBoundary.append(
                        graph.vertex(graph.edge(tree[i]).toVertex()).point())
                    lowerBoundary.append(
                        graph.vertex(graph.edge(tree[i]).fromVertex()).point())

                geomUpper = QgsGeometry.fromMultiPointXY(upperBoundary)
                geomLower = QgsGeometry.fromMultiPointXY(lowerBoundary)

                feat.setGeometry(geomUpper)
                feat['type'] = 'upper'
                feat['start'] = startPoint.toString()
                point_sink.addFeature(feat, QgsFeatureSink.FastInsert)

                feat.setGeometry(geomLower)
                feat['type'] = 'lower'
                feat['start'] = startPoint.toString()
                point_sink.addFeature(feat, QgsFeatureSink.FastInsert)

        (line_sink,
         line_dest_id) = self.parameterAsSink(parameters, self.OUTPUT_LINES,
                                              context, fields,
                                              QgsWkbTypes.MultiLineString,
                                              network.sourceCrs())
        if line_sink is not None:
            results[self.OUTPUT_LINES] = line_dest_id
            geom_lines = QgsGeometry.fromMultiPolylineXY(lines)
            feat.setGeometry(geom_lines)
            feat['type'] = 'lines'
            feat['start'] = startPoint.toString()
            line_sink.addFeature(feat, QgsFeatureSink.FastInsert)

        return results
Exemple #45
0
class CentroidTracker:
	def __init__(self, maxDisappeared=50, maxDistance=50):
		self.nextObjectID = 0
		self.objects = OrderedDict()
		self.disappeared = OrderedDict()
		self.maxDisappeared = maxDisappeared
		self.maxDistance = maxDistance

	def register(self, centroid):
		self.objects[self.nextObjectID] = centroid
		self.disappeared[self.nextObjectID] = 0
		self.nextObjectID += 1

	def deregister(self, objectID):
		del self.objects[objectID]
		del self.disappeared[objectID]

	def update(self, rects):
		if len(rects) == 0:
			for objectID in list(self.disappeared.keys()):
				self.disappeared[objectID] += 1
				if self.disappeared[objectID] > self.maxDisappeared:
					self.deregister(objectID)
			return self.objects
		inputCentroids = np.zeros((len(rects), 2), dtype="int")
		for (i, (startX, startY, endX, endY)) in enumerate(rects):
			cX = int((startX + endX) / 2.0)
			cY = int((startY + endY) / 2.0)
			inputCentroids[i] = (cX, cY)
		if len(self.objects) == 0:
			for i in range(0, len(inputCentroids)):
				self.register(inputCentroids[i])
		else:
			objectIDs = list(self.objects.keys())
			objectCentroids = list(self.objects.values())
			D = dist.cdist(np.array(objectCentroids), inputCentroids)
			rows = D.min(axis=1).argsort()
			cols = D.argmin(axis=1)[rows]
			usedRows = set()
			usedCols = set()
			for (row, col) in zip(rows, cols):
				if row in usedRows or col in usedCols:
					continue
				if D[row, col] > self.maxDistance:
					continue
				objectID = objectIDs[row]
				self.objects[objectID] = inputCentroids[col]
				self.disappeared[objectID] = 0
				usedRows.add(row)
				usedCols.add(col)
			unusedRows = set(range(0, D.shape[0])).difference(usedRows)
			unusedCols = set(range(0, D.shape[1])).difference(usedCols)
			if D.shape[0] >= D.shape[1]:
				for row in unusedRows:
					objectID = objectIDs[row]
					self.disappeared[objectID] += 1
					if self.disappeared[objectID] > self.maxDisappeared:
						self.deregister(objectID)
			else:
				for col in unusedCols:
					self.register(inputCentroids[col])
		return self.objects
Exemple #46
0
class RoutingBin(object):
    """
    List of active nodes up to K size.
    Designated as 'k-buckets' in Kademlia literature.
    Intended for use as leaves of RoutingZones tree it acts as
    an LRU cache for known nodes but with a preference to keep nodes
    that have been active the longest duration.
    """
    def __init__(self, maxsize=K):
        self.maxsize = maxsize
        self.nodes = OrderedDict()
        self.replacements = OrderedDict()

    def get_by_id(self, node_id):
        """
        Return the node corresponding to the supplied id.
        @param node_id: Id of node to lookup.
        @return Node with node_id or None if not found.
        """
        return self.nodes.get(node_id)

    def get_by_address(self, address, port):
        """
        Return the node corresponding to the supplied address details.
        @param address: IP Address of node to lookup.
        @param port: Port number of node to lookup.
        @return Node with specified details or None id not found.
        """
        for x in self.nodes.values():
            if x.address == address and x.port == port:
                return x
        return None

    def get_all(self):
        """
        Return all nodes in this routing bin.
        @return List of nodes.
        """
        return self.nodes.values()

    def get_node_ids(self):
        """
        Return Ids of all nodes in this routing bin.
        @return List of node ids.
        """
        return self.nodes.keys()

    def push(self, node):
        """
        Adds the supplied node into the routing bin.
        If the bin is full it will overflow into the
        replacement cache.
        @param node: Node to be added.
        """
        node_id = node.node_id
        if self.remaining():
            self.nodes[node_id] = node
        else:
            # add to replacement cache
            # ensure pushed to end as most recent
            if node_id in self.replacements:
                self.replacements.pop(node_id)
            self.replacements[node_id] = node
            # trim if needed
            if self.replacements > self.maxsize:
                self.replacements.popitem()

    def get_oldest(self):
        """
        Returns the node that hasn't had activity for the 
        longest duration.
        @return: Oldest node in the active list.
        """
        return self.nodes.values()[0]

    def pop(self, node_id):
        """
        Removes the specified node from the routing bin.
        The node's place may be taken by another waiting
        in the replacement cache.
        @param node_id: Node to remove.
        @return: The node that was removed from the bin.
        """
        if not node_id in self.get_node_ids():
            return None
        # promote a replacement node if available
        if self.replacements:
            repl = self.replacements.popitem(last=True)
            self.nodes[repl.node_id] = repl
        return self.nodes.pop(node_id)

    def get_closest_to(self, target, max_nodes=1):
        """
        Return the node/s whose distance is the closest to the 
        supplied target id.
        @param target: Target Id for distance
        @param max_nodes: Maximum number of nodes to return.
        @return: A list of closest nodes with len() <= max_nodes
        """
        nodes = sorted(self.get_all(),
                       key=lambda x: distance(x.node_id, target))
        nodes = nodes[:max_nodes]
        return nodes

    def remaining(self):
        """
        @return: The remaining space for active nodes in this bin.
        """
        return self.maxsize - len(self)

    def update(self, node_id):
        """
        Updates the specified node as having recent activity.
        @param node_id: Id of the node to move in list.
        """
        node = self.nodes.pop(node_id)
        self.nodes[node_id] = node

    def __len__(self):
        """
        @return: The number of active nodes in this bin.
        """
        return len(self.nodes)
Exemple #47
0
class CopyMapping(object):
    """
    Maps comma-delimited file to Django model and loads it into PostgreSQL database using COPY command.
    """
    def __init__(self,
                 model,
                 csv_path_or_obj,
                 mapping,
                 using=None,
                 delimiter=',',
                 quote_character=None,
                 null=None,
                 force_not_null=None,
                 force_null=None,
                 encoding=None,
                 ignore_conflicts=False,
                 static_mapping=None,
                 on_conflict=[],
                 static_mapping_on_conflict=None,
                 temp_table_name_suffix=""):
        # Set the required arguments
        self.model = model
        self.csv_path_or_obj = csv_path_or_obj

        # If the CSV is not a file object already ...
        if hasattr(csv_path_or_obj, 'read'):
            self.csv_file = csv_path_or_obj
        else:
            # ... verify the path exists ...
            if not os.path.exists(self.csv_path_or_obj):
                raise ValueError("CSV path does not exist")
            # ... then open it up.
            self.csv_file = open(self.csv_path_or_obj, 'r')

        # Hook in the other optional settings
        self.quote_character = quote_character
        self.delimiter = delimiter
        self.null = null
        self.force_not_null = force_not_null
        self.force_null = force_null
        self.encoding = encoding
        self.supports_ignore_conflicts = True
        self.ignore_conflicts = ignore_conflicts
        if static_mapping is not None:
            self.static_mapping = OrderedDict(static_mapping)
        else:
            self.static_mapping = {}

        # Line up the database connection
        if using is not None:
            self.using = using
        else:
            self.using = router.db_for_write(model)
        self.conn = connections[self.using]
        self.backend = self.conn.ops

        # Verify it is PostgreSQL
        if self.conn.vendor != 'postgresql':
            raise TypeError("Only PostgreSQL backends supported")

        # Check if it is PSQL 9.5 or greater, which determines if ignore_conflicts is supported
        self.supports_ignore_conflicts = self.is_postgresql_9_5()
        if self.ignore_conflicts and not self.supports_ignore_conflicts:
            raise NotSupportedError(
                'This database backend does not support ignoring conflicts.')

        # Pull the CSV headers
        self.headers = self.get_headers()

        # Map them to the model
        self.mapping = self.get_mapping(mapping)

        # Make sure the everything is legit
        self.validate_mapping()

        # Configure the name of our temporary table to COPY into
        self.temp_table_name = "temp_%s" % (self.model._meta.db_table + "_" +
                                            temp_table_name_suffix)

        self.on_conflict = on_conflict
        self.static_mapping_on_conflict = static_mapping_on_conflict

    def save(self, silent=False, stream=sys.stdout):
        """
        Saves the contents of the CSV file to the database.

        Override this method and use 'self.create(cursor)`,
        `self.copy(cursor)`, `self.insert(cursor)`, and `self.drop(cursor)`
        if you need functionality other than the default create/copy/insert/drop
        workflow.

         silent:
           By default, non-fatal error notifications are printed to stdout,
           but this keyword may be set to disable these notifications.

         stream:
           Status information will be written to this file handle. Defaults to
           using `sys.stdout`, but any object with a `write` method is
           supported.
        """
        logger.debug("Loading CSV to {}".format(self.model.__name__))
        if not silent:
            stream.write("Loading CSV to {}\n".format(self.model.__name__))

        # Connect to the database
        with self.conn.cursor() as c:
            self.create(c)
            self.copy(c)
            insert_count = self.insert(c)
            self.drop(c)

        if not silent:
            stream.write("{} records loaded\n".format(intcomma(insert_count)))

        return insert_count

    def is_postgresql_9_5(self):
        return self.conn.pg_version >= 90500

    def get_field(self, name):
        """
        Returns any fields on the database model matching the provided name.
        """
        try:
            return self.model._meta.get_field(name)
        except FieldDoesNotExist:
            return None

    def get_mapping(self, mapping):
        """
        Returns a generated mapping based on the CSV header
        """
        if mapping:
            return OrderedDict(mapping)
        return {name: name for name in self.headers}

    def get_headers(self):
        """
        Returns the column headers from the csv as a list.
        """
        logger.debug("Retrieving headers from {}".format(self.csv_file))
        # Open it as a CSV
        csv_reader = csv.reader(self.csv_file, delimiter=self.delimiter)
        # Pop the headers
        headers = next(csv_reader)
        # Move back to the top of the file
        self.csv_file.seek(0)
        # Return the headers
        return headers

    def validate_mapping(self):
        """
        Verify that the mapping provided by the user is acceptable.

        Raises errors if something goes wrong. Returns nothing if everything is kosher.
        """
        # Make sure all of the CSV headers in the mapping actually exist
        for map_header in self.mapping.values():
            if map_header not in self.headers:
                raise ValueError(
                    "Header '{}' not found in CSV file".format(map_header))

        # Make sure all the model fields in the mapping actually exist
        for map_field in self.mapping.keys():
            if not self.get_field(map_field):
                raise FieldDoesNotExist(
                    "Model does not include {} field".format(map_field))

        # Make sure any static mapping columns exist
        for static_field in self.static_mapping.keys():
            if not self.get_field(static_field):
                raise ValueError(
                    "Model does not include {} field".format(static_field))

    #
    # CREATE commands
    #

    def prep_create(self):
        """
        Creates a CREATE statement that makes a new temporary table.

        Returns SQL that can be run.
        """
        sql = """CREATE TEMPORARY TABLE "%(table_name)s" (%(field_list)s);"""
        options = dict(table_name=self.temp_table_name)
        field_list = []

        # Loop through all the fields and CSV headers together
        for header in self.headers:

            # Format the SQL create statement
            string = '"%s" text' % header

            # Add the string to the list
            field_list.append(string)

        # Join all the field strings together
        options['field_list'] = ", ".join(field_list)

        # Mash together the SQL and pass it out
        return sql % options

    def create(self, cursor):
        """
        Generate and run create sql for the temp table.
        Runs a DROP on same prior to CREATE to avoid collisions.

        cursor:
          A cursor object on the db
        """
        logger.debug("Running CREATE command")
        self.drop(cursor)
        create_sql = self.prep_create()
        logger.debug(create_sql)
        cursor.execute(create_sql)

    #
    # COPY commands
    #

    def prep_copy(self):
        """
        Creates a COPY statement that loads the CSV into a temporary table.

        Returns SQL that can be run.
        """
        sql = """
            COPY "%(db_table)s" (%(header_list)s)
            FROM STDIN
            WITH CSV HEADER %(extra_options)s;
        """
        options = {
            'db_table': self.temp_table_name,
            'extra_options': '',
            'header_list': ", ".join(['"{}"'.format(h) for h in self.headers])
        }
        if self.quote_character:
            options['extra_options'] += " QUOTE '{}'".format(
                self.quote_character)
        if self.delimiter:
            options['extra_options'] += " DELIMITER '{}'".format(
                self.delimiter)
        if self.null is not None:
            options['extra_options'] += " NULL '{}'".format(self.null)
        if self.force_not_null is not None:
            options['extra_options'] += " FORCE NOT NULL {}".format(','.join(
                '"{}"'.format(s) for s in self.force_not_null))
        if self.force_null is not None:
            options['extra_options'] += " FORCE NULL {}".format(','.join(
                '"%s"' % s for s in self.force_null))
        if self.encoding:
            options['extra_options'] += " ENCODING '{}'".format(self.encoding)
        return sql % options

    def pre_copy(self, cursor):
        pass

    def copy(self, cursor):
        """
        Generate and run the COPY command to copy data from csv to temp table.

        Calls `self.pre_copy(cursor)` and `self.post_copy(cursor)` respectively
        before and after running copy

        cursor:
          A cursor object on the db
        """
        # Run pre-copy hook
        self.pre_copy(cursor)

        logger.debug("Running COPY command")
        copy_sql = self.prep_copy()
        logger.debug(copy_sql)
        cursor.copy_expert(copy_sql, self.csv_file)

        # Run post-copy hook
        self.post_copy(cursor)

    def post_copy(self, cursor):
        pass

    #
    # INSERT commands
    #

    def insert_suffix(self):
        # If on_conflict is not an empty list
        if self.on_conflict:
            # First item on list - Operation
            if self.on_conflict[0] == 'DO NOTHING':
                return """
                    ON CONFLICT DO NOTHING;
                """
            elif self.on_conflict[0] == 'DO UPDATE':
                conflict_sql = self.on_conflict.copy()

                # Second item on list - Constraint
                constraint = conflict_sql[1]
                # Delete first two items on list. Only columns to be updated remain
                del conflict_sql[0:2]

                update_columns = ', '.join(
                    ["{0} = EXCLUDED.{0}".format(col) for col in conflict_sql])

                if self.static_mapping_on_conflict is not None:
                    update_columns += ', '
                    update_columns += ', '.join([
                        "{0} = '{1}'".format(
                            col, self.static_mapping_on_conflict[col])
                        for col in self.static_mapping_on_conflict
                    ])

                return """
                    ON CONFLICT {0} DO UPDATE SET {1};
                    """.format(constraint, update_columns)
        else:
            return ";"

    def prep_insert(self):
        """
        Creates a INSERT statement that reorders and cleans up
        the fields from the temporary table for insertion into the
        Django model.

        Returns SQL that can be run.
        """
        sql = """
            INSERT INTO "%(model_table)s" (%(model_fields)s) (
            SELECT %(temp_fields)s
            FROM "%(temp_table)s")%(insert_suffix)s
        """
        options = dict(model_table=self.model._meta.db_table,
                       temp_table=self.temp_table_name,
                       insert_suffix=self.insert_suffix())

        #
        # The model fields to be inserted into
        #

        model_fields = []
        for field_name, header in self.mapping.items():
            field = self.get_field(field_name)
            model_fields.append('"%s"' % field.get_attname_column()[1])

        for k in self.static_mapping.keys():
            model_fields.append('"%s"' % k)

        options['model_fields'] = ", ".join(model_fields)

        #
        # The temp fields to SELECT from
        #

        temp_fields = []
        for field_name, header in self.mapping.items():
            # Pull the field object from the model
            field = self.get_field(field_name)
            field_type = field.db_type(self.conn)

            # Format the SQL
            string = 'cast("%s" as %s)' % (header, field_type)

            # Apply a datatype template override, if it exists
            if hasattr(field, 'copy_template'):
                string = field.copy_template % dict(name=header)

            # Apply a field specific template override, if it exists
            template_method = 'copy_%s_template' % field.name
            if hasattr(self.model, template_method):
                template = getattr(self.model(), template_method)()
                string = template % dict(name=header)

            # Add field to list
            temp_fields.append(string)

        # Tack on static fields
        for v in self.static_mapping.values():
            temp_fields.append("'%s'" % v)

        # Join it all together
        options['temp_fields'] = ", ".join(temp_fields)

        # Pass it out
        return sql % options

    def pre_insert(self, cursor):
        pass

    def insert(self, cursor):
        """
        Generate and run the INSERT command to move data from the temp table
        to the concrete table.

        Calls `self.pre_copy(cursor)` and `self.post_copy(cursor)` respectively
        before and after running copy

        returns: the count of rows inserted

        cursor:
          A cursor object on the db
        """
        # Pre-insert hook
        self.pre_insert(cursor)

        logger.debug("Running INSERT command")
        insert_sql = self.prep_insert()
        logger.debug(insert_sql)
        cursor.execute(insert_sql)
        insert_count = cursor.rowcount
        logger.debug("{} rows inserted".format(insert_count))

        # Post-insert hook
        self.post_insert(cursor)

        # Return the row count
        return insert_count

    def post_insert(self, cursor):
        pass

    #
    # DROP commands
    #

    def prep_drop(self):
        """
        Creates a DROP statement that gets rid of the temporary table.

        Return SQL that can be run.
        """
        return 'DROP TABLE IF EXISTS "%s";' % self.temp_table_name

    def drop(self, cursor):
        """
        Generate and run the DROP command for the temp table.

        cursor:
          A cursor object on the db
        """
        logger.debug("Running DROP command")
        drop_sql = self.prep_drop()
        logger.debug(drop_sql)
        cursor.execute(drop_sql)
class Profiles:

    def __init__(self):
        self.data = OrderedDict()
        self.order = list()

    def get_profiles_list(self):
        """
        returns list of profile keys
        :return:
        """
        return self.data.keys()

    def add_profile(self, name: str):
        """
        adds profile with given name and default values
        :param name: name of profile
        :return:
        """
        self.data[name] = {'AfterWake': {"AuxLeds": []},
                           'PowerOn': {'Blade': {'Speed': 144}, "AuxLeds": []},
                           'WorkingMode': {'Color': [0, 0, 255], 'Flaming': 0, 'FlickeringAlways': 0, "AuxLeds": []},
                           'PowerOff': {'Blade': {'Speed': 144, 'MoveForward': 0}, "AuxLeds": []},
                           'Flaming': {'Size': {'Min': 2, 'Max': 9}, 'Speed': {'Min': 12, 'Max': 27},
                                       'Delay_ms': {'Min': 54, 'Max': 180},
                                       'Colors': ['random']},
                           'Flickering': {'Time': {'Min': 90, 'Max': 360}, 'Brightness': {'Min': 50, 'Max': 100},
                                          "AuxLeds": []},
                           'Blaster': {'Color': [255, 0, 0], 'Duration_ms': 720, 'SizePix': 7, "AuxLeds": []},
                           'Clash': {'Color': [255, 0, 0], 'Duration_ms': 720, 'SizePix': 11, "AuxLeds": []},
                           'Stab': {'Color': [255, 0, 0], 'Duration_ms': 720, 'SizePix': 11, "AuxLeds": []},
                           'Lockup': {'Flicker': {'Color': [255, 0, 0], 'Time': {'Min': 45, 'Max': 80},
                                                  'Brightness': {'Min': 50, 'Max': 100}},
                                      'Flashes': {'Period': {'Min': 15, 'Max': 25}, 'Color': [255, 0, 0],
                                                  'Duration_ms': 50,
                                                  'SizePix': 7},
                                      "AuxLeds": []},
                           'Blade2': {
                               'IndicateBlasterClashLockup': 1,
                               'WorkingMode': {'Color': [0, 0, 255]},
                               'Flaming': {'AlwaysOn': 0, 'Size': {'Min': 2, 'Max': 9},
                                           'Speed': {'Min': 12, 'Max': 27},
                                           'Delay_ms': {'Min': 54, 'Max': 180},
                                           'Colors': ['random']},
                               'Flickering': {'AlwaysOn': 0, 'Time': {'Min': 90, 'Max': 360},
                                              'Brightness': {'Min': 50, 'Max': 100}},
                               'DelayBeforeOn': 200}
                           }
        self.order.append(name)

    def delete_profile(self, name: str):
        """
        deletes profile by key
        :param name: name of profile
        :return:
        """
        self.data.pop(name)
        self.order.remove(name)

    @staticmethod
    def get_default(path: List[str]) -> Dict[str, Any]:
        """
        gets default value for key path
        :param path: pat of keys
        :return: default value
        """
        data = default_profile
        for key in path:
            data = data[key]
        return data

    def get_value(self, path: List[str], profile: str) -> Dict[str, Any]:
        """
        gets value of field configured with pat for profile
        :param path: path of keys
        :param profile: profile key
        :return: value
        """
        data = self.data[profile]
        for key in path:
            data = data[key]
        return data

    def update_value(self, path: List[str], profile: str, value: object):
        """
        updates field for given keys in given profile, makes in given value
        :param path: key path
        :param profile: profile
        :param value: given new value
        :return:
        """
        try:
            data = self.data[profile]
            for key in path[:-1]:
                data = data[key]
            data[path[-1]] = value
        except (KeyError, IndexError):
            print("no such key or keypath is empty")  # to do logging

    def save_color(self, path: Sequence[str], color: Union[Sequence[int], str], profile: str):
        """
        saves color given as a list of rgb components to path in profiledata dict
        :param path: path of keys
        :param color: rgb components
        :param profile: current profile
        :return:
        """
        try:
            data = self.data[profile]
            for key in path[:-1]:
                data = data[key]
            data[path[-1]].append(color)
        except (IndexError, TypeError, ValueError):
            print("wrong keypath or color data")  # to do logging

    def get_colors(self, path: List[str], profile: str) -> Optional[List[str]]:
        """
        gets colors list for profile and path
        :param path: list of keys
        :param profile: current profile
        :return: list of colors
        """
        try:
            data = self.data[profile]
            for key in path:
                data = data[key]
            return data
        except KeyError:
            print("Wrong keys used")  # to do logging
            return None

    def delete_color(self, path: List[int], color: List[int], profile: str):
        """
        deletes color given as a list of rgb components from path in profiledata dict
        :param path: path of keys
        :param color: rgb components
        :param profile: current profile
        :return:
        """
        try:
            data = self.data[profile]
            for key in path[:-1]:
                data = data[key]
            data[path[-1]].remove(color)
        except (IndexError, TypeError, ValueError):
            print("wrong keypath or color data")  # to do logging

    def save_aux(self, aux: str, effect: str, profile: str):
        """
        save aux effect to effect in profile
        :param aux:
        :param effect:
        :param profile:
        :return:
        """
        data = self.data[profile][effect]
        if aux_key not in data.keys():
            data[aux_key] = [aux]
        else:
            data[aux_key].append(aux)

    def delete_aux(self, aux: str, effect: str, profile: str):
        try:
            data = self.data[profile][effect]
            data[aux_key].remove(aux)
            # if not data[aux_key]:
            #    data.pop(aux_key)
        except (IndexError, KeyError):
            print("Incorrects key or aux name")  # to do logging

    def get_aux_effects(self, effect: str, profile: str):
        """
        gets list of auxeffects for selected profile and effect
        :param effect: effect to get aux effects for
        :param profile: profile get auxeffects for
        :return:
        """
        data = self.data
        data = data[profile][effect]
        return data.get(aux_key, [])

    def change_key_order(self, old: str, new: str):
        """
        removes old key, adds new key to profile data dict, renames key in order list
        :param old: old key
        :param new: new key
        :return:
        """
        real_key = ""
        for key in self.data.keys():
            if key.lower() == old.lower():
                real_key = old
        if real_key == "":
            return "No % s profile" % old, -1
        self.data[new] = self.data[real_key]
        self.data.pop(old)
        i = self.order.index(old)
        self.order[i] = new
        return "", i

    def order_changed(self, key: str, direction: str):
        """
        changes key order in order list, moves selected key up or down
        :param key: selected profile name
        :param direction: up or down
        :return:
        """
        i = self.order.index(key)
        self.order.remove(key)
        if direction == "Up":
            self.order.insert(i - 1, key)
        if direction == "Down":
            self.order.insert(i + 1, key)
        return i

    def save_to_file(self, data, filename: str):
        """
        saves to filename as pseudo-json (no quotes)
        :param data: data to save
        :param filename: name of file to save
        :return:
        """
        pprint.sorted = lambda x, key=None: x
        new_data = {key: data[key] for key in self.order}
        text = pprint.pformat(new_data)
        text = text.replace(r"'", "")
        text = text[1:-1]
        f = open(filename, "w", encoding="utf-8")
        f.write(text)

    @staticmethod
    def get_default_value(key_list: List[str]) -> Dict[str, Any]:
        """
        get value from defaults using key path
        :param key_list: path of keys
        :return: value
        """
        temp_data = default_profile
        for key in key_list:
            temp_data = temp_data[key]
        return temp_data

    @staticmethod
    def check_section(new_data: dict, check_function: Callable, param: str, profile: str) -> str:
        """
        checks section of loaded from text data
        :param new_data: data
        :param check_function: function to check with
        :param param: key of section
        :param profile: name of profile for error
        :return: warning text
        """
        checker = profilechecker.ProfileChecker()
        if check_function.__name__ == "check_flaming":
            e = check_function(new_data, ['size', 'speed', 'delay_ms', 'colors', 'auxleds'])
        elif check_function.__name__ == 'check_flickering':
            e = check_function(new_data, ['time', 'brightness', 'auxleds'])
        elif check_function.__name__ == 'check_movement':
            e = check_function(new_data, param)
        else:
            e = check_function(new_data)
        if e:
            section_key = checker.get_key(new_data, param)
            if section_key:
                new_data.pop(section_key)
            return "ERROR! " + profile + ": " + param + ': ' + e + " profile not loaded;\n"
        return ""

    @staticmethod
    def load_data_from_text(text: str):
        """
        loads data from texts
        :param text: text with data
        :return:
        """
        new_data, error = IniToJson.get_json(text)
        if error or new_data is None:
            return None, error, ""
        if not isinstance(new_data, dict):
            return None, "Wrong profile data format", ""
        new_data = OrderedDict(new_data)
        warning = ""
        wrong_profile_keys = list()
        checker = profilechecker.ProfileChecker()
        for profile in new_data.keys():
            if not isinstance(new_data[profile], dict):
                warning += ("Wrong settings format for profile %s, profile not loaded\n" % profile)
                wrong_profile_keys.append(profile)
                continue
            w, wrong_keys = profilechecker.check_keys(new_data[profile],
                                                      [key.lower() for key in default_profile.keys()])
            if w:
                warning += profile + ': ' + w + '\n'
            for key in wrong_keys:
                new_data[profile].pop(key)
            warning += Profiles.check_section(new_data[profile], checker.check_afterwake, "afterwake", profile)
            warning += Profiles.check_section(new_data[profile], checker.check_poweron, "poweron", profile)
            warning += Profiles.check_section(new_data[profile], checker.check_workingmode, "workingmode", profile)
            warning += Profiles.check_section(new_data[profile], checker.check_poweroff, "powefoff", profile)
            warning += Profiles.check_section(new_data[profile], checker.check_flaming, "flaming", profile)
            warning += Profiles.check_section(new_data[profile], checker.check_flickering, "flickering", profile)
            warning += Profiles.check_section(new_data[profile], checker.check_movement, "blaster", profile)
            warning += Profiles.check_section(new_data[profile], checker.check_movement, "stab", profile)
            warning += Profiles.check_section(new_data[profile], checker.check_movement, "clash", profile)
            warning += Profiles.check_section(new_data[profile], checker.check_lockup, "lockup", profile)
            warning += Profiles.check_section(new_data[profile], checker.check_blade2, "lockup", profile)

            for key in default_profile.keys():
                if key.lower() not in [key.lower() for key in new_data[profile].keys()]:
                    wrong_profile_keys.append(profile)

        for key in set(wrong_profile_keys):
            new_data.pop(key)
        return new_data, "", warning
Exemple #49
0
class Struct(Field):
    """Represents a named list of fields sharing the same domain.
    """
    def __init__(self, *fields):
        """ fields is a list of tuples in format of (name, field). The name is
        a string of nested name, e.g., `a`, `a:b`, `a:b:c`. For example

        Struct(
          ('a', Scalar()),
          ('b:c', Scalar()),
          ('b:d:e', Scalar()),
          ('b', Struct(
            ('f', Scalar()),
          )),
        )

        is equal to

        Struct(
          ('a', Scalar()),
          ('b', Struct(
            ('c', Scalar()),
            ('d', Struct(('e', Scalar()))),
            ('f', Scalar()),
          )),
        )
        """
        for field in fields:
            assert len(field) == 2
            assert field[0], 'Field names cannot be empty'
            assert field[0] != 'lengths', (
                'Struct cannot contain a field named `lengths`.')
        fields = [(name, _normalize_field(field)) for name, field in fields]
        self.fields = OrderedDict()
        for name, field in fields:
            if FIELD_SEPARATOR in name:
                name, field = self._struct_from_nested_name(name, field)
            if name not in self.fields:
                self.fields[name] = field
                continue
            if (not isinstance(field, Struct)
                    or not isinstance(self.fields[name], Struct)):
                raise ValueError('Duplicate field name: %s' % name)
            self.fields[name] = self.fields[name] + field
        for id, (_, field) in enumerate(self.fields.items()):
            field._set_parent(self, id)
        Field.__init__(self, self.fields.values())

    def _struct_from_nested_name(self, nested_name, field):
        def create_internal(nested_name, field):
            names = nested_name.split(FIELD_SEPARATOR, 1)
            if len(names) == 1:
                added_field = field
            else:
                added_field = create_internal(names[1], field)
            return Struct((names[0], added_field))

        names = nested_name.split(FIELD_SEPARATOR, 1)
        assert len(names) >= 2
        return names[0], create_internal(names[1], field)

    def get_children(self):
        return self.fields.items()

    def field_names(self):
        names = []
        for name, field in self.fields.items():
            names += [_join_field_name(name, f) for f in field.field_names()]
        return names

    def field_types(self):
        types = []
        for _, field in self.fields.items():
            types += field.field_types()
        return types

    def field_metadata(self):
        metadata = []
        for _, field in self.fields.items():
            metadata += field.field_metadata()
        return metadata

    def field_blobs(self):
        blobs = []
        for _, field in self.fields.items():
            blobs += field.field_blobs()
        return blobs

    def all_scalars(self):
        scalars = []
        for _, field in self.fields.items():
            scalars += field.all_scalars()
        return scalars

    def has_blobs(self):
        return all(field.has_blobs() for field in self.fields.values())

    def clone(self, keep_blobs=True):
        normalized_fields = [(k, _normalize_field(v, keep_blobs=keep_blobs))
                             for k, v in self.fields.items()]
        return Struct(*normalized_fields)

    def _get_field_by_nested_name(self, nested_name):
        names = nested_name.split(FIELD_SEPARATOR, 1)
        field = self.fields.get(names[0], None)

        if field is None:
            return None

        if len(names) == 1:
            return field

        try:
            return field[names[1]]
        except (KeyError, TypeError):
            return None

    def __repr__(self):
        return "Struct({})".format(', '.join([
            "{}={!r}".format(name, field)
            for name, field in self.fields.items()
        ]))

    def __contains__(self, item):
        field = self._get_field_by_nested_name(item)
        return field is not None

    def __len__(self):
        return len(self.fields)

    def __getitem__(self, item):
        """
        item can be a tuple or list of ints or strings, or a single
        int or string. String item is a nested field name, e.g., "a", "a:b",
        "a:b:c". Int item is the index of a field at the first level of the
        Struct.
        """
        if isinstance(item, list) or isinstance(item, tuple):
            return Struct(
                *[(self.fields.keys()[k] if isinstance(k, int) else k, self[k])
                  for k in item])
        elif isinstance(item, int):
            return self.fields.values()[item]
        else:
            field = self._get_field_by_nested_name(item)
            if field is None:
                raise KeyError('field "%s" not found' % (item))
            return field

    def __getattr__(self, item):
        if item.startswith('__'):
            raise AttributeError(item)
        try:
            return self.__dict__['fields'][item]
        except KeyError:
            raise AttributeError(item)

    def __add__(self, other):
        """
        Allows to merge fields of two schema.Struct using '+' operator.
        If two Struct have common field names, the merge is conducted
        recursively. Here are examples:

        Example 1
        s1 = Struct(('a', Scalar()))
        s2 = Struct(('b', Scalar()))
        s1 + s2 == Struct(
            ('a', Scalar()),
            ('b', Scalar()),
        )

        Example 2
        s1 = Struct(
            ('a', Scalar()),
            ('b', Struct(('c', Scalar()))),
        )
        s2 = Struct(('b', Struct(('d', Scalar()))))
        s1 + s2 == Struct(
            ('a', Scalar()),
            ('b', Struct(
                ('c', Scalar()),
                ('d', Scalar()),
            )),
        )
        """
        if not isinstance(other, Struct):
            return NotImplemented

        children = OrderedDict(self.get_children())
        for name, right_field in other.get_children():
            if name not in children:
                children[name] = right_field
                continue
            left_field = children[name]
            children[name] = left_field + right_field

        return Struct(*(children.items()))
Exemple #50
0
options['Class B Lap Splice'] = set_to_classb_lapsplice
options['Tension Development Length'] = set_to_tension_dev_length
options['Prefix: Plus/Minus'] = add_plusminus_prefix
options['Suffix: Plus/Minus'] = add_plusminus_suffix
options['Suffix: R.O. Below: VIF MFR'] = set_to_vfrmfr
options['Suffix: VIF'] = set_to_vif_suffix
options['Below: VIF'] = set_to_vif_below
options['Suffix: (?)'] = set_to_question
options['Suffix: P.D.'] = set_to_pd
options['Below: P.D.'] = set_to_pd_beow
options['Suffix: R.O.'] = set_to_ro
options['Below: R.O.'] = set_to_ro_below
options['Below: TYP'] = set_to_typ_below
options['Suffix: TYP'] = set_to_typ_suffix
options['Below: MIN'] = set_to_min_below
options['Suffix: MIN'] = set_to_min_suffix
options['Below: MAX'] = set_to_max_below
options['Suffix: MAX'] = set_to_max_suffix
options['Below: CLR'] = set_to_clr_below
options['Suffix: CLR'] = set_to_clr_suffix
options['Below: UNO'] = set_to_uno_below
options['Suffix: UNO'] = set_to_uno_suffix


selected_switch = \
    forms.CommandSwitchWindow.show(options.keys(),
                                   message='Pick override option:')

if selected_switch:
    options[selected_switch]()
Exemple #51
0
class Network:
    """Generic network abstraction.

    Acts as a convenience wrapper for a parameterized network construction
    function, providing several utility methods and convenient access to
    the inputs/outputs/weights.

    Network objects can be safely pickled and unpickled for long-term
    archival purposes. The pickling works reliably as long as the underlying
    network construction function is defined in a standalone Python module
    that has no side effects or application-specific imports.

    Args:
        name: Network name. Used to select TensorFlow name and variable scopes.
        func_name: Fully qualified name of the underlying network construction function, or a top-level function object.
        static_kwargs: Keyword arguments to be passed in to the network construction function.

    Attributes:
        name: User-specified name, defaults to build func name if None.
        scope: Unique TensorFlow scope containing template graph and variables, derived from the user-specified name.
        static_kwargs: Arguments passed to the user-supplied build func.
        components: Container for sub-networks. Passed to the build func, and retained between calls.
        num_inputs: Number of input tensors.
        num_outputs: Number of output tensors.
        input_shapes: Input tensor shapes (NC or NCHW), including minibatch dimension.
        output_shapes: Output tensor shapes (NC or NCHW), including minibatch dimension.
        input_shape: Short-hand for input_shapes[0].
        output_shape: Short-hand for output_shapes[0].
        input_templates: Input placeholders in the template graph.
        output_templates: Output tensors in the template graph.
        input_names: Name string for each input.
        output_names: Name string for each output.
        own_vars: Variables defined by this network (local_name => var), excluding sub-networks.
        vars: All variables (local_name => var).
        trainables: All trainable variables (local_name => var).
        var_global_to_local: Mapping from variable global names to local names.
    """
    def __init__(self,
                 name: str = None,
                 func_name: Any = None,
                 **static_kwargs):
        tfutil.assert_tf_initialized()
        assert isinstance(name, str) or name is None
        assert func_name is not None
        assert isinstance(func_name,
                          str) or util.is_top_level_function(func_name)
        assert util.is_pickleable(static_kwargs)

        self._init_fields()
        self.name = name
        self.static_kwargs = util.EasyDict(static_kwargs)

        # Locate the user-specified network build function.
        if util.is_top_level_function(func_name):
            func_name = util.get_top_level_function_name(func_name)
        module, self._build_func_name = util.get_module_from_obj_name(
            func_name)
        self._build_func = util.get_obj_from_module(module,
                                                    self._build_func_name)
        assert callable(self._build_func)

        # Dig up source code for the module containing the build function.
        self._build_module_src = _import_module_src.get(module, None)
        if self._build_module_src is None:
            self._build_module_src = inspect.getsource(module)

        # Init TensorFlow graph.
        self._init_graph()
        self.reset_own_vars()

    def _init_fields(self) -> None:
        self.name = None
        self.scope = None
        self.static_kwargs = util.EasyDict()
        self.components = util.EasyDict()
        self.num_inputs = 0
        self.num_outputs = 0
        self.input_shapes = [[]]
        self.output_shapes = [[]]
        self.input_shape = []
        self.output_shape = []
        self.input_templates = []
        self.output_templates = []
        self.input_names = []
        self.output_names = []
        self.own_vars = OrderedDict()
        self.vars = OrderedDict()
        self.trainables = OrderedDict()
        self.var_global_to_local = OrderedDict()

        self._build_func = None  # User-supplied build function that constructs the network.
        self._build_func_name = None  # Name of the build function.
        self._build_module_src = None  # Full source code of the module containing the build function.
        self._run_cache = dict()  # Cached graph data for Network.run().

    def _init_graph(self) -> None:
        # Collect inputs.
        self.input_names = []

        for param in inspect.signature(self._build_func).parameters.values():
            if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
                self.input_names.append(param.name)

        self.num_inputs = len(self.input_names)
        assert self.num_inputs >= 1

        # Choose name and scope.
        if self.name is None:
            self.name = self._build_func_name
        assert re.match("^[A-Za-z0-9_.\\-]*$", self.name)
        with tf.name_scope(None):
            self.scope = tf.get_default_graph().unique_name(self.name,
                                                            mark_as_used=True)

        # Finalize build func kwargs.
        build_kwargs = dict(self.static_kwargs)
        build_kwargs["is_template_graph"] = True
        build_kwargs["components"] = self.components

        # Build template graph.
        with tfutil.absolute_variable_scope(
                self.scope, reuse=tf.AUTO_REUSE), tfutil.absolute_name_scope(
                    self.scope):  # ignore surrounding scopes
            assert tf.get_variable_scope().name == self.scope
            assert tf.get_default_graph().get_name_scope() == self.scope
            with tf.control_dependencies(
                    None):  # ignore surrounding control dependencies
                self.input_templates = [
                    tf.placeholder(tf.float32, name=name)
                    for name in self.input_names
                ]
                out_expr = self._build_func(*self.input_templates,
                                            **build_kwargs)

        # Collect outputs.
        assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)
        self.output_templates = [
            out_expr
        ] if tfutil.is_tf_expression(out_expr) else list(out_expr)
        self.num_outputs = len(self.output_templates)
        assert self.num_outputs >= 1
        assert all(tfutil.is_tf_expression(t) for t in self.output_templates)

        # Perform sanity checks.
        if any(t.shape.ndims is None for t in self.input_templates):
            raise ValueError(
                "Network input shapes not defined. Please call x.set_shape() for each input."
            )
        if any(t.shape.ndims is None for t in self.output_templates):
            raise ValueError(
                "Network output shapes not defined. Please call x.set_shape() where applicable."
            )
        if any(not isinstance(comp, Network)
               for comp in self.components.values()):
            raise ValueError(
                "Components of a Network must be Networks themselves.")
        if len(self.components) != len(
                set(comp.name for comp in self.components.values())):
            raise ValueError("Components of a Network must have unique names.")

        # List inputs and outputs.
        self.input_shapes = [
            tfutil.shape_to_list(t.shape) for t in self.input_templates
        ]
        self.output_shapes = [
            tfutil.shape_to_list(t.shape) for t in self.output_templates
        ]
        self.input_shape = self.input_shapes[0]
        self.output_shape = self.output_shapes[0]
        self.output_names = [
            t.name.split("/")[-1].split(":")[0] for t in self.output_templates
        ]

        # List variables.
        self.own_vars = OrderedDict(
            (var.name[len(self.scope) + 1:].split(":")[0], var)
            for var in tf.global_variables(self.scope + "/"))
        self.vars = OrderedDict(self.own_vars)
        self.vars.update((comp.name + "/" + name, var)
                         for comp in self.components.values()
                         for name, var in comp.vars.items())
        self.trainables = OrderedDict(
            (name, var) for name, var in self.vars.items() if var.trainable)
        self.var_global_to_local = OrderedDict(
            (var.name.split(":")[0], name) for name, var in self.vars.items())

    def reset_own_vars(self) -> None:
        """Re-initialize all variables of this network, excluding sub-networks."""
        tfutil.run([var.initializer for var in self.own_vars.values()])

    def reset_vars(self) -> None:
        """Re-initialize all variables of this network, including sub-networks."""
        tfutil.run([var.initializer for var in self.vars.values()])

    def reset_trainables(self) -> None:
        """Re-initialize all trainable variables of this network, including sub-networks."""
        tfutil.run([var.initializer for var in self.trainables.values()])

    def get_output_for(
            self,
            *in_expr: TfExpression,
            return_as_list: bool = False,
            **dynamic_kwargs) -> Union[TfExpression, List[TfExpression]]:
        """Construct TensorFlow expression(s) for the output(s) of this network, given the input expression(s)."""
        assert len(in_expr) == self.num_inputs
        assert not all(expr is None for expr in in_expr)

        # Finalize build func kwargs.
        build_kwargs = dict(self.static_kwargs)
        build_kwargs.update(dynamic_kwargs)
        build_kwargs["is_template_graph"] = False
        build_kwargs["components"] = self.components

        # Build TensorFlow graph to evaluate the network.
        with tfutil.absolute_variable_scope(
                self.scope, reuse=True), tf.name_scope(self.name):
            assert tf.get_variable_scope().name == self.scope
            valid_inputs = [expr for expr in in_expr if expr is not None]
            final_inputs = []
            for expr, name, shape in zip(in_expr, self.input_names,
                                         self.input_shapes):
                if expr is not None:
                    expr = tf.identity(expr, name=name)
                else:
                    expr = tf.zeros([tf.shape(valid_inputs[0])[0]] + shape[1:],
                                    name=name)
                final_inputs.append(expr)
            out_expr = self._build_func(*final_inputs, **build_kwargs)

        # Propagate input shapes back to the user-specified expressions.
        for expr, final in zip(in_expr, final_inputs):
            if isinstance(expr, tf.Tensor):
                expr.set_shape(final.shape)

        # Express outputs in the desired format.
        assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)
        if return_as_list:
            out_expr = [
                out_expr
            ] if tfutil.is_tf_expression(out_expr) else list(out_expr)
        return out_expr

    def get_var_local_name(
            self, var_or_global_name: Union[TfExpression, str]) -> str:
        """Get the local name of a given variable, without any surrounding name scopes."""
        assert tfutil.is_tf_expression(var_or_global_name) or isinstance(
            var_or_global_name, str)
        global_name = var_or_global_name if isinstance(
            var_or_global_name, str) else var_or_global_name.name
        return self.var_global_to_local[global_name]

    def find_var(self, var_or_local_name: Union[TfExpression,
                                                str]) -> TfExpression:
        """Find variable by local or global name."""
        assert tfutil.is_tf_expression(var_or_local_name) or isinstance(
            var_or_local_name, str)
        return self.vars[var_or_local_name] if isinstance(
            var_or_local_name, str) else var_or_local_name

    def get_var(self, var_or_local_name: Union[TfExpression,
                                               str]) -> np.ndarray:
        """Get the value of a given variable as NumPy array.
        Note: This method is very inefficient -- prefer to use tflib.run(list_of_vars) whenever possible."""
        return self.find_var(var_or_local_name).eval()

    def set_var(self, var_or_local_name: Union[TfExpression, str],
                new_value: Union[int, float, np.ndarray]) -> None:
        """Set the value of a given variable based on the given NumPy array.
        Note: This method is very inefficient -- prefer to use tflib.set_vars() whenever possible."""
        tfutil.set_vars({self.find_var(var_or_local_name): new_value})

    def __getstate__(self) -> dict:
        """Pickle export."""
        state = dict()
        state["version"] = 3
        state["name"] = self.name
        state["static_kwargs"] = dict(self.static_kwargs)
        state["components"] = dict(self.components)
        state["build_module_src"] = self._build_module_src
        state["build_func_name"] = self._build_func_name
        state["variables"] = list(
            zip(self.own_vars.keys(),
                tfutil.run(list(self.own_vars.values()))))
        return state

    def __setstate__(self, state: dict) -> None:
        """Pickle import."""
        # pylint: disable=attribute-defined-outside-init
        tfutil.assert_tf_initialized()
        self._init_fields()

        # Execute custom import handlers.
        for handler in _import_handlers:
            state = handler(state)

        # Set basic fields.
        assert state["version"] in [2, 3]
        self.name = state["name"]
        self.static_kwargs = util.EasyDict(state["static_kwargs"])
        self.components = util.EasyDict(state.get("components", {}))
        self._build_module_src = state["build_module_src"]
        self._build_func_name = state["build_func_name"]

        # Create temporary module from the imported source code.
        module_name = "_tflib_network_import_" + uuid.uuid4().hex
        module = types.ModuleType(module_name)
        sys.modules[module_name] = module
        _import_module_src[module] = self._build_module_src
        exec(self._build_module_src, module.__dict__)  # pylint: disable=exec-used

        # Locate network build function in the temporary module.
        self._build_func = util.get_obj_from_module(module,
                                                    self._build_func_name)
        assert callable(self._build_func)

        # Init TensorFlow graph.
        self._init_graph()
        self.reset_own_vars()
        tfutil.set_vars(
            {self.find_var(name): value
             for name, value in state["variables"]})

    def clone(self, name: str = None, **new_static_kwargs) -> "Network":
        """Create a clone of this network with its own copy of the variables."""
        # pylint: disable=protected-access
        net = object.__new__(Network)
        net._init_fields()
        net.name = name if name is not None else self.name
        net.static_kwargs = util.EasyDict(self.static_kwargs)
        net.static_kwargs.update(new_static_kwargs)
        net._build_module_src = self._build_module_src
        net._build_func_name = self._build_func_name
        net._build_func = self._build_func
        net._init_graph()
        net.copy_vars_from(self)
        return net

    def copy_own_vars_from(self, src_net: "Network") -> None:
        """Copy the values of all variables from the given network, excluding sub-networks."""
        names = [
            name for name in self.own_vars.keys() if name in src_net.own_vars
        ]
        tfutil.set_vars(
            tfutil.run({self.vars[name]: src_net.vars[name]
                        for name in names}))

    def copy_vars_from(self, src_net: "Network") -> None:
        """Copy the values of all variables from the given network, including sub-networks."""
        names = [name for name in self.vars.keys() if name in src_net.vars]
        tfutil.set_vars(
            tfutil.run({self.vars[name]: src_net.vars[name]
                        for name in names}))

    def copy_trainables_from(self, src_net: "Network") -> None:
        """Copy the values of all trainable variables from the given network, including sub-networks."""
        names = [
            name for name in self.trainables.keys()
            if name in src_net.trainables
        ]
        tfutil.set_vars(
            tfutil.run({self.vars[name]: src_net.vars[name]
                        for name in names}))

    def convert(self,
                new_func_name: str,
                new_name: str = None,
                **new_static_kwargs) -> "Network":
        """Create new network with the given parameters, and copy all variables from this network."""
        if new_name is None:
            new_name = self.name
        static_kwargs = dict(self.static_kwargs)
        static_kwargs.update(new_static_kwargs)
        net = Network(name=new_name, func_name=new_func_name, **static_kwargs)
        net.copy_vars_from(self)
        return net

    def setup_as_moving_average_of(
            self,
            src_net: "Network",
            beta: TfExpressionEx = 0.99,
            beta_nontrainable: TfExpressionEx = 0.0) -> tf.Operation:
        """Construct a TensorFlow op that updates the variables of this network
        to be slightly closer to those of the given network."""
        with tfutil.absolute_name_scope(self.scope + "/_MovingAvg"):
            ops = []
            for name, var in self.vars.items():
                if name in src_net.vars:
                    cur_beta = beta if name in self.trainables else beta_nontrainable
                    new_value = tfutil.lerp(src_net.vars[name], var, cur_beta)
                    ops.append(var.assign(new_value))
            return tf.group(*ops)

    def run(
        self,
        *in_arrays: Tuple[Union[np.ndarray, None], ...],
        input_transform: dict = None,
        output_transform: dict = None,
        return_as_list: bool = False,
        print_progress: bool = False,
        minibatch_size: int = None,
        num_gpus: int = 1,
        assume_frozen: bool = False,
        **dynamic_kwargs
    ) -> Union[np.ndarray, Tuple[np.ndarray, ...], List[np.ndarray]]:
        """Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s).

        Args:
            input_transform:    A dict specifying a custom transformation to be applied to the input tensor(s) before evaluating the network.
                                The dict must contain a 'func' field that points to a top-level function. The function is called with the input
                                TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs.
            output_transform:   A dict specifying a custom transformation to be applied to the output tensor(s) after evaluating the network.
                                The dict must contain a 'func' field that points to a top-level function. The function is called with the output
                                TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs.
            return_as_list:     True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs.
            print_progress:     Print progress to the console? Useful for very large input arrays.
            minibatch_size:     Maximum minibatch size to use, None = disable batching.
            num_gpus:           Number of GPUs to use.
            assume_frozen:      Improve multi-GPU performance by assuming that the trainable parameters will remain changed between calls.
            dynamic_kwargs:     Additional keyword arguments to be passed into the network build function.
        """
        assert len(in_arrays) == self.num_inputs
        assert not all(arr is None for arr in in_arrays)
        assert input_transform is None or util.is_top_level_function(
            input_transform["func"])
        assert output_transform is None or util.is_top_level_function(
            output_transform["func"])
        output_transform, dynamic_kwargs = _handle_legacy_output_transforms(
            output_transform, dynamic_kwargs)
        num_items = in_arrays[0].shape[0]
        if minibatch_size is None:
            minibatch_size = num_items

        # Construct unique hash key from all arguments that affect the TensorFlow graph.
        key = dict(input_transform=input_transform,
                   output_transform=output_transform,
                   num_gpus=num_gpus,
                   assume_frozen=assume_frozen,
                   dynamic_kwargs=dynamic_kwargs)

        def unwind_key(obj):
            if isinstance(obj, dict):
                return [(key, unwind_key(value))
                        for key, value in sorted(obj.items())]
            if callable(obj):
                return util.get_top_level_function_name(obj)
            return obj

        key = repr(unwind_key(key))

        # Build graph.
        if key not in self._run_cache:
            with tfutil.absolute_name_scope(
                    self.scope + "/_Run"), tf.control_dependencies(None):
                with tf.device("/cpu:0"):
                    in_expr = [
                        tf.placeholder(tf.float32, name=name)
                        for name in self.input_names
                    ]
                    in_split = list(
                        zip(*[tf.split(x, num_gpus) for x in in_expr]))

                out_split = []
                for gpu in range(num_gpus):
                    with tf.device("/gpu:%d" % gpu):
                        net_gpu = self.clone() if assume_frozen else self
                        in_gpu = in_split[gpu]

                        if input_transform is not None:
                            in_kwargs = dict(input_transform)
                            in_gpu = in_kwargs.pop("func")(*in_gpu,
                                                           **in_kwargs)
                            in_gpu = [in_gpu] if tfutil.is_tf_expression(
                                in_gpu) else list(in_gpu)

                        assert len(in_gpu) == self.num_inputs
                        out_gpu = net_gpu.get_output_for(*in_gpu,
                                                         return_as_list=True,
                                                         **dynamic_kwargs)

                        if output_transform is not None:
                            out_kwargs = dict(output_transform)
                            out_gpu = out_kwargs.pop("func")(*out_gpu,
                                                             **out_kwargs)
                            out_gpu = [out_gpu] if tfutil.is_tf_expression(
                                out_gpu) else list(out_gpu)

                        assert len(out_gpu) == self.num_outputs
                        out_split.append(out_gpu)

                with tf.device("/cpu:0"):
                    out_expr = [
                        tf.concat(outputs, axis=0)
                        for outputs in zip(*out_split)
                    ]
                    self._run_cache[key] = in_expr, out_expr

        # Run minibatches.
        in_expr, out_expr = self._run_cache[key]
        out_arrays = [
            np.empty([num_items] + tfutil.shape_to_list(expr.shape)[1:],
                     expr.dtype.name) for expr in out_expr
        ]

        for mb_begin in range(0, num_items, minibatch_size):
            if print_progress:
                print("\r%d / %d" % (mb_begin, num_items), end="")

            mb_end = min(mb_begin + minibatch_size, num_items)
            mb_num = mb_end - mb_begin
            mb_in = [
                src[mb_begin:mb_end]
                if src is not None else np.zeros([mb_num] + shape[1:])
                for src, shape in zip(in_arrays, self.input_shapes)
            ]
            mb_out = tf.get_default_session().run(out_expr,
                                                  dict(zip(in_expr, mb_in)))

            for dst, src in zip(out_arrays, mb_out):
                dst[mb_begin:mb_end] = src

        # Done.
        if print_progress:
            print("\r%d / %d" % (num_items, num_items))

        if not return_as_list:
            out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(
                out_arrays)
        return out_arrays

    def list_ops(self) -> List[TfExpression]:
        include_prefix = self.scope + "/"
        exclude_prefix = include_prefix + "_"
        ops = tf.get_default_graph().get_operations()
        ops = [op for op in ops if op.name.startswith(include_prefix)]
        ops = [op for op in ops if not op.name.startswith(exclude_prefix)]
        return ops

    def list_layers(
            self) -> List[Tuple[str, TfExpression, List[TfExpression]]]:
        """Returns a list of (layer_name, output_expr, trainable_vars) tuples corresponding to
        individual layers of the network. Mainly intended to be used for reporting."""
        layers = []

        def recurse(scope, parent_ops, parent_vars, level):
            # Ignore specific patterns.
            if any(
                    p in scope for p in
                ["/Shape", "/strided_slice", "/Cast", "/concat", "/Assign"]):
                return

            # Filter ops and vars by scope.
            global_prefix = scope + "/"
            local_prefix = global_prefix[len(self.scope) + 1:]
            cur_ops = [
                op for op in parent_ops if op.name.startswith(global_prefix)
                or op.name == global_prefix[:-1]
            ]
            cur_vars = [
                (name, var) for name, var in parent_vars
                if name.startswith(local_prefix) or name == local_prefix[:-1]
            ]
            if not cur_ops and not cur_vars:
                return

            # Filter out all ops related to variables.
            for var in [
                    op for op in cur_ops if op.type.startswith("Variable")
            ]:
                var_prefix = var.name + "/"
                cur_ops = [
                    op for op in cur_ops if not op.name.startswith(var_prefix)
                ]

            # Scope does not contain ops as immediate children => recurse deeper.
            contains_direct_ops = any("/" not in op.name[len(global_prefix):]
                                      and op.type != "Identity"
                                      for op in cur_ops)
            if (level == 0 or not contains_direct_ops) and (len(cur_ops) +
                                                            len(cur_vars)) > 1:
                visited = set()
                for rel_name in [
                        op.name[len(global_prefix):] for op in cur_ops
                ] + [name[len(local_prefix):] for name, _var in cur_vars]:
                    token = rel_name.split("/")[0]
                    if token not in visited:
                        recurse(global_prefix + token, cur_ops, cur_vars,
                                level + 1)
                        visited.add(token)
                return

            # Report layer.
            layer_name = scope[len(self.scope) + 1:]
            layer_output = cur_ops[-1].outputs[0] if cur_ops else cur_vars[-1][
                1]
            layer_trainables = [
                var for _name, var in cur_vars if var.trainable
            ]
            layers.append((layer_name, layer_output, layer_trainables))

        recurse(self.scope, self.list_ops(), list(self.vars.items()), 0)
        return layers

    def print_layers(self,
                     title: str = None,
                     hide_layers_with_no_params: bool = False) -> None:
        """Print a summary table of the network structure."""
        rows = [[
            title if title is not None else self.name, "Params", "OutputShape",
            "WeightShape"
        ]]
        rows += [["---"] * 4]
        total_params = 0

        for layer_name, layer_output, layer_trainables in self.list_layers():
            num_params = sum(
                np.prod(tfutil.shape_to_list(var.shape))
                for var in layer_trainables)
            weights = [
                var for var in layer_trainables
                if var.name.endswith("/weight:0")
            ]
            weights.sort(key=lambda x: len(x.name))
            if len(weights) == 0 and len(layer_trainables) == 1:
                weights = layer_trainables
            total_params += num_params

            if not hide_layers_with_no_params or num_params != 0:
                num_params_str = str(num_params) if num_params > 0 else "-"
                output_shape_str = str(layer_output.shape)
                weight_shape_str = str(
                    weights[0].shape) if len(weights) >= 1 else "-"
                rows += [[
                    layer_name, num_params_str, output_shape_str,
                    weight_shape_str
                ]]

        rows += [["---"] * 4]
        rows += [["Total", str(total_params), "", ""]]

        widths = [max(len(cell) for cell in column) for column in zip(*rows)]
        print()
        for row in rows:
            print("  ".join(cell + " " * (width - len(cell))
                            for cell, width in zip(row, widths)))
        print()

    def setup_weight_histograms(self, title: str = None) -> None:
        """Construct summary ops to include histograms of all trainable parameters in TensorBoard."""
        if title is None:
            title = self.name

        with tf.name_scope(None), tf.device(None), tf.control_dependencies(
                None):
            for local_name, var in self.trainables.items():
                if "/" in local_name:
                    p = local_name.split("/")
                    name = title + "_" + p[-1] + "/" + "_".join(p[:-1])
                else:
                    name = title + "_toplevel/" + local_name

                tf.summary.histogram(name, var)
weather_ordered = OrderedDict()
weather_ordered["few clouds"] = percentage["few clouds"]
weather_ordered["scattered clouds"] = percentage["scattered clouds"]
weather_ordered["broken clouds"] = percentage["broken clouds"]
weather_ordered["light intensity drizzle"] = percentage[
    "light intensity drizzle"]
weather_ordered["light intensity drizzle rain"] = percentage[
    "light intensity drizzle rain"]
weather_ordered["light rain"] = percentage["light rain"]
weather_ordered["moderate rain"] = percentage["moderate rain"]
weather_ordered["mist"] = percentage["mist"]

print(weather_ordered)

N = len(weather_ordered.keys())

fig, ax = plt.subplots(figsize=(5, 5))

ind = np.arange(N)  # the x locations for the groups
width = 0.3  # the width of the bars

p1 = ax.bar(ind, [i for i in weather_ordered.values()], width, color="#a569bd")
ax.set_ylim(0, 100)
ax.yaxis.set_major_locator(MultipleLocator(10))

ax.set_title('Objekterkennungen nach detailiertem Wetterstatus')
ax.set_xlabel("detailierter Wetterstatus")
ax.set_ylabel("Objekterkennungen in %")
#ax.set_xticks(ind + width / 2)
ax.set_xticklabels([""] + list(weather_ordered.keys()), rotation=90)

def _valid_light_switch(value):
    return _valid_device(value, "light_switch")


DEVICE_SCHEMA = vol.Schema({
    vol.Required(ATTR_NAME): cv.string,
    vol.Optional(ATTR_FIREEVENT, default=False): cv.boolean,
})

DEVICE_SCHEMA_SENSOR = vol.Schema({
    vol.Optional(ATTR_NAME, default=None): cv.string,
    vol.Optional(ATTR_FIREEVENT, default=False): cv.boolean,
    vol.Optional(ATTR_DATA_TYPE, default=[]):
        vol.All(cv.ensure_list, [vol.In(DATA_TYPES.keys())]),
})

DEVICE_SCHEMA_BINARYSENSOR = vol.Schema({
    vol.Optional(ATTR_NAME, default=None): cv.string,
    vol.Optional(CONF_DEVICE_CLASS, default=None): cv.string,
    vol.Optional(ATTR_FIREEVENT, default=False): cv.boolean,
    vol.Optional(ATTR_OFF_DELAY, default=None):
        vol.Any(cv.time_period, cv.positive_timedelta),
    vol.Optional(ATTR_DATA_BITS, default=None): cv.positive_int,
    vol.Optional(CONF_COMMAND_ON, default=None): cv.byte,
    vol.Optional(CONF_COMMAND_OFF, default=None): cv.byte
})

DEFAULT_SCHEMA = vol.Schema({
    vol.Required("platform"): DOMAIN,
Exemple #54
0
class ScriptGenerator:
    """ Generates build scripts on the fly by providing a default header
        tailored to the current build context and performing substitution
        on exported macros from this instance """

    macros = None
    context = None
    spec = None
    exports = None
    unexports = None
    work_dir = None

    def __init__(self, context, spec, work_dir):
        self.work_dir = work_dir
        self.macros = OrderedDict()
        self.context = context
        self.spec = spec
        self.init_default_macros()
        self.load_system_macros()
        self.init_default_exports()

    def define_macro(self, key, value):
        """ Define a named macro. This will take the form %name% """
        self.macros["%{}%".format(key)] = value

    def define_action_macro(self, key, value):
        """ Define an action macro. These take the form %action """
        self.macros["%{}".format(key)] = value

    def define_export(self, key, value):
        """ Define a shell export for scripts """
        self.exports[key] = value

    def define_unexport(self, key):
        """ Ensure key is unexported from shell script """
        self.unexports[key] = (None, )

    def load_system_macros(self):
        path = os.path.join(os.path.dirname(__file__), "rc.yml")

        try:
            f = open(path, "r")
            yamlData = yaml_load(f, Loader=Loader)
            f.close()
        except Exception as e:
            console_ui.emit_error("SCRIPTS", "Cannot load system macros")
            print(e)
            return

        for section in ["defines", "actions"]:
            if section not in yamlData:
                continue
            v = yamlData[section]

            if not isinstance(v, list):
                console_ui.emit_error("rc.yml",
                                      "Expected list of defines in rc config")
                return
            for item in v:
                if not isinstance(item, dict):
                    console_ui.emit_error(
                        "rc.yml", "Expected key:value mapping in list")
                    return
                keys = item.keys()
                if len(keys) > 1:
                    console_ui.emit_error("rc.yml",
                                          "Expected one key in key:value")
                    return
                key = keys[0]
                value = item[key]
                if value.endswith("\n"):
                    value = value[:-1]
                value = value.strip()
                if section == "defines":
                    self.define_macro(key, unicode(value))
                else:
                    self.define_action_macro(key, unicode(value))

    def init_default_macros(self):

        if self.context.emul32:
            if self.context.avx2:
                self.define_macro("libdir", "/usr/lib32/avx2")
            else:
                self.define_macro("libdir", "/usr/lib32")
            self.define_macro("LIBSUFFIX", "32")
            self.define_macro("PREFIX", "/usr")
        else:
            # 64-bit AVX2 build in subdirectory
            if self.context.avx2:
                self.define_macro("libdir", "/usr/lib64/avx2")
            else:
                self.define_macro("libdir", "/usr/lib64")
            self.define_macro("LIBSUFFIX", "64")
            self.define_macro("PREFIX", "/usr")

        self.define_macro("installroot", self.context.get_install_dir())
        self.define_macro("workdir", self.work_dir)
        self.define_macro("JOBS", "-j{}".format(self.context.build.jobcount))
        self.define_macro("YJOBS", "{}".format(self.context.build.jobcount))

        # Consider moving this somewhere else
        self.define_macro("CFLAGS", " ".join(self.context.build.cflags))
        self.define_macro("CXXFLAGS", " ".join(self.context.build.cxxflags))
        self.define_macro("LDFLAGS", " ".join(self.context.build.ldflags))

        self.define_macro("HOST", self.context.build.host)
        self.define_macro("ARCH", self.context.build.arch)
        self.define_macro("PKGNAME", self.spec.pkg_name)
        self.define_macro("PKGFILES", self.context.files_dir)

        self.define_macro("package", self.context.spec.pkg_name)
        self.define_macro("release", self.context.spec.pkg_release)
        self.define_macro("version", self.context.spec.pkg_version)
        self.define_macro("sources", self.context.get_sources_directory())

    def init_default_exports(self):
        """ Initialise our exports """
        self.exports = OrderedDict()
        self.unexports = OrderedDict()

        self.define_export("CFLAGS", " ".join(self.context.build.cflags))
        self.define_export("CXXFLAGS", " ".join(self.context.build.cxxflags))
        self.define_export("LDFLAGS", " ".join(self.context.build.ldflags))
        self.define_export("FFLAGS", " ".join(self.context.build.cflags))
        self.define_export("FCFLAGS", " ".join(self.context.build.cflags))
        self.define_export("PATH", self.context.get_path())
        self.define_export("workdir", "%workdir%")
        self.define_export("package", "%package%")
        self.define_export("release", "%release%")
        self.define_export("version", "%version%")
        self.define_export("sources", "%sources%")
        self.define_export("pkgfiles", "%PKGFILES%")
        self.define_export("installdir", "%installroot%")
        self.define_export("CC", self.context.build.cc)
        self.define_export("CXX", self.context.build.cxx)
        if self.context.build.ld_as_needed:
            self.define_export("LD_AS_NEEDED", "1")

        # Handle lto correctly
        if self.context.spec.pkg_optimize == "speed":
            self.define_export("AR", "gcc-ar")
            self.define_export("RANLIB", "gcc-ranlib")
            self.define_export("NM", "gcc-nm")

        if not console_ui.allow_colors:
            self.define_export("TERM", "dumb")

        # Mask display
        self.define_unexport("DISPLAY")
        # Mask sudo from anyone
        self.define_unexport("SUDO_USER")
        self.define_unexport("SUDO_GID")
        self.define_unexport("SUDO_UID")
        self.define_unexport("SUDO_COMMAND")
        self.define_unexport("CDPATH")

    def emit_exports(self):
        """ TODO: Grab known exports into an OrderedDict populated by an rc
            YAML file to allow easier manipulation """
        ret = []
        for key in self.exports:
            ret.append("export {}=\"{}\"".format(key, self.exports[key]))

        unset_line = "unset {} || :".format(" ".join(self.unexports.keys()))
        ret.append(unset_line)
        return ret

    def is_valid_macro_char(self, char):
        if char.isalpha() or char.isdigit():
            return True
        if char == "_":
            return True

    def escape_single(self, line):
        offset = line.find('%')
        if offset < 0:
            return (line, False)

        tmp_name = "%"
        tmp_idx = 0
        for i in xrange(offset + 1, len(line)):
            if line[i] == "%":
                tmp_name += "%"
                break
            if self.is_valid_macro_char(line[i]):
                tmp_name += line[i]
            else:
                break
        start = line[0:offset]
        remnant = line[offset + len(tmp_name):]
        # TODO: Change to is-valid-macro check and consume anyway
        if tmp_name in self.macros:
            mc = self.macros[tmp_name]
            if mc is None:
                mc = ""
            line = "%s%s%s" % (start, mc, remnant)
            return (line, True)
        else:
            line = "%s%s%s" % (start, tmp_name, remnant)
            return (line, False)

    def escape_string(self, input_string):
        """ Recursively escape our macros out of a string until no more of our
            macros appear in it """
        ret = []

        for line in input_string.split("\n"):
            while (True):
                (line, cont) = self.escape_single(line)
                if not cont:
                    ret.append(line)
                    break

        return "\n".join(ret)
Exemple #55
0
class UnitSampler(BaseChunkCollector):
    """Groups data-units based on `id_fname`, produces samples of data-units.

    Sequentially absorbs data-chunks until a data-unit with a different
    `id_fname` entry is encountered. In such case, it will start the sampling
    phase.

    If a group has more units than the maximum specified, than the
    specified number of units will be sampled to create each chunk until
    all units are used. Otherwise, sampling is performed only once.
    """
    
    def __init__(self, id_fname, min_units=None, max_units=None, sample_all=True):
        """
        :param id_fname: self-explanatory.
        :param min_units: if a batch or a group has less reviews than
                                  specified, it is discarded.
        :param max_units: the limit of reviews that each group
                                  in the chunk will have. When limit is
                                  reached for a group, the other reviews
                                  are ignored.
        :param sample_all: if set to True performs sampling of reviews until
                                all are used, otherwise samples only once and
                                drops the rest.
        """
        super(UnitSampler, self).__init__(max_size=1)
        self.id_fname = id_fname
        if min_units and max_units:
            assert min_units <= max_units
        self.min_units = min_units
        self.max_units = max_units
        self.sample_all_revs = sample_all

        self._coll = OrderedDict()
        self._prev_group_id = None

    def __len__(self):
        first_key = next(iter(self._coll.keys()))
        return len(self._coll[first_key])

    def absorb_and_yield_if_full(self, data_chunk):
        for indx in range(len(data_chunk)):
            group_id = data_chunk[indx, self.id_fname]

            if self._prev_group_id and group_id != self._prev_group_id:
                for chunk in self.yield_remaining():
                    yield chunk
                self.reset()
            self._prev_group_id = group_id

            if not len(self._coll):
                for fn in data_chunk.fnames:
                    if isinstance(data_chunk[fn], np.ndarray):
                        self._coll[fn] = np.array([], dtype=data_chunk[fn].dtype)
                    else:
                        self._coll[fn] = []

            for fn in data_chunk.fnames:
                val = data_chunk[indx, fn]
                if fn not in self._coll:
                    raise DataChunkError("Input chunks have different field "
                                         "names.")
                if isinstance(self._coll[fn], np.ndarray):
                    self._coll[fn] = np.append(self._coll[fn], val)
                else:
                    self._coll[fn].append(val)

    def yield_remaining(self):
        for chunk in self.compile_chunks():
            if self.min_units and len(chunk) < self.min_units:
                continue
            yield chunk

    def get_coll_len(self):
        if not len(self._coll):
            return 0
        first_key = next(iter(self._coll.keys()))
        return len(self._coll[first_key])

    def reset(self):
        self._coll = OrderedDict()
        self._prev_group_id = None

    def compile_chunks(self):
        """Compiles data-chunks filled with group sequences."""
        if self.max_units:
            while len(self):
                if len(self) > self.max_units:
                    sel_indxs = random.choice(range(len(self)), replace=False,
                                              size=self.max_units)
                else:
                    sel_indxs = range(len(self))

                # create an output data-chunk based on the selected units
                dc = DataChunk()
                for k, val in self._coll.items():
                    dc[k] = [val[indx] for indx in sel_indxs]
                    if isinstance(val, np.ndarray):
                        dc[k] = np.array(dc[k], dtype=val.dtype)
                yield dc

                # removing the selected indxs from the collector
                for indx in sorted(sel_indxs, reverse=True):
                    for fn in self._coll:
                        if isinstance(self._coll[fn], np.ndarray):
                            self._coll[fn] = np.delete(self._coll[fn], indx)
                        else:
                            del self._coll[fn][indx]

                # stop the cycle as one sample is already produced
                if not self.sample_all_revs:
                    break
        else:
            dc = DataChunk()
            for k, val in self._coll.items():
                dc[k] = val
            yield dc
Exemple #56
0
class Graph(object):
    def __init__(self, name, dataset, log_level=logging.DEBUG):
        default_graph = self
        self.name = name
        self.dataset = dataset
        self.logger = logging.getLogger(name)
        self.logger.setLevel(log_level)
        self.tensor_registry = OrderedDict()
        self.tensor_id_counter = 0
        self.op_id_counter = 0
        self.op_registry = OrderedDict()
        self.op_type_counter = {}
        self.current_scope = ""
        self.scope_stack = deque([""])
        self.grad_dtype = FQDtype.FP32
        self.intermediate_dtype = FQDtype.FXP32

    def set_gradient_dtype(self, dtype):
        assert isinstance(dtype, Dtype)
        self.grad_dtype = dtype

    def get_dot(self):
        dot = Digraph()
        dot_node_dict = {}
        for opname, op in self.op_registry.items():
            dot_node_dict[op] = '{}\nshape={}\ndtype={}'.format(
                opname, op.output_tensors.shape, op.output_tensors.dtype)
        for opname, op in self.op_registry.items():
            is_sink = len(op.output_tensors.output_nodes) == 0
            if is_sink:
                dot.node(dot_node_dict[op], fillcolor='pink', style='filled')
            else:
                dot.node(dot_node_dict[op], fillcolor='cyan', style='filled')
            for t in op.input_tensors:
                if t.op is None:
                    tensor_name = '{}\nshape = {}\ndtype = {}'.format(
                        t.name, t.shape, t.dtype)
                    dot.node(tensor_name,
                             shape='rectangle',
                             fillcolor='gray',
                             style='filled')
                    dot.edge(tensor_name, dot_node_dict[op])
                else:
                    dot.edge(dot_node_dict[t.op], dot_node_dict[op])
        return dot

    def tensor(self, shape, name=None, dtype=None, trainable=True, data=None):
        assert shape is not None, shape
        assert isinstance(shape, tuple) or isinstance(shape, int)
        if isinstance(shape, list):
            shape = tuple(shape)
        if name is None:
            name = str(self.tensor_id_counter)
            self.tensor_id_counter += 1
        name = '{}{}'.format(self.current_scope, name)
        assert name not in self.tensor_registry.keys(
        ), 'Tensor with name {} already exists!'.format(name)
        t = Tensor(shape, name, data, dtype, trainable)
        self.tensor_registry[name] = t
        self.logger.debug('Created tensor {}'.format(t.__str__()))
        return t

    def register_tensor(self, t):
        assert t.name not in self.tensor_registry.keys()
        self.tensor_registry[t.name] = t

    def create_node(self, op):
        name = op.name
        name = '{}{}'.format(self.current_scope, name)
        op.name = name
        assert name not in self.op_registry, 'Op with name {} already exists!'.format(
            name)
        self.op_registry[name] = op

        for t in op.input_tensors:
            t.output_nodes.append(op)

        self.logger.debug('Created op {}'.format(op.name))
        return op.output_tensors

    def get_trainable_tensors(self):
        trainable_tensors = []
        for tname in self.tensor_registry.keys():
            t = self.tensor_registry[tname]
            if t.trainable:
                trainable_tensors.append(t)
        return tuple(trainable_tensors)

    def set_graph_context(self, c):
        self.graph_context = c

    def as_default(self):
        return _default_graph_stack.get_controller(self)

    def get_op_dependencies(self, tensor):
        if tensor.op is None:
            return tuple([])
        deps = [tensor.op]
        for t in tensor.op.input_tensors:
            if t.op is not None:
                for op in self.get_op_dependencies(t):
                    deps.append(op)
        return tuple(deps)

    def get_tensor_dependencies(self, tensor):
        tlist = []
        for op in self.get_op_dependencies(tensor):
            for t in op.input_tensors:
                tlist.append(t)
        return tuple(tlist)

    def get_op_name(self, name, op_type):
        if op_type not in self.op_type_counter:
            self.op_type_counter[op_type] = 0

        if name is None:
            op_count = self.op_type_counter[op_type]
            if op_count == 0:
                name = op_type
            else:
                name = '{}:{}'.format(op_type, self.op_type_counter[op_type])

        self.op_type_counter[op_type] += 1
        return name

    def get_ops(self):
        total_ops = {}
        for opname, op in self.op_registry.items():
            for op_type, num_ops in op.get_ops().items():
                if op_type not in total_ops:
                    total_ops[op_type] = 0
                total_ops[op_type] += num_ops
        return total_ops

    @contextmanager
    def name_scope(self, name):
        current_scope = self.current_scope
        current_op_type_counter = self.op_type_counter.copy()
        if self.current_scope == "":
            next_scope = '{}/'.format(name)
        else:
            next_scope = '{}{}/'.format(self.current_scope, name)
        try:
            self.op_type_counter = {}
            self.current_scope = next_scope
            yield
        finally:
            self.op_type_counter = current_op_type_counter
            self.current_scope = current_scope

    def print_ops(self):
        total_ops = {}
        g = self
        for key, op in g.op_registry.items():
            sub_ops = op.get_ops()
            if len(sub_ops.keys()) > 0:
                for op, num in sub_ops.items():
                    sopname = op.__str__()
                    if sopname not in total_ops:
                        total_ops[sopname] = num
                    else:
                        total_ops[sopname] += num

        print('*' * 100)
        for sop, num in total_ops.items():
            print('{:>80}: {:>20,}'.format(sop, num))

    def benchmark_tf(self,
                     phase='forward+backward',
                     csv_file='gpu_baseline.csv'):

        assert phase in ['forward', 'backward', 'forward+backward']

        if not os.path.exists(csv_file):
            gpu_df = pd.DataFrame(columns=[
                'Platform', 'Phase', 'Benchmark', 'Time Mean (sec)',
                'Time Standard Deviation (sec)', 'Power Mean (Watt)',
                'Power Standard Deviation (Watt)'
            ])
        else:
            gpu_df = pd.read_csv(csv_file)

        r = lookup_pandas_dataframe(gpu_df, {
            'Benchmark': self.name,
            'Phase': phase
        })

        if len(r) == 0:

            from polymath.codegen.dnnweavergen.dnnweaver2.tf_utils import get_tf_performance

            if phase == 'backward':
                print('backward')
                t_mn, t_sd, p_mn, p_sd = get_tf_performance(
                    self, 'forward+backward')
                f_t_mn, f_t_sd, f_p_mn, f_p_sd = get_tf_performance(
                    self, 'forward')
                t_mn -= f_t_mn
            elif phase == 'forward':
                print('forward')
                t_mn, t_sd, p_mn, p_sd = get_tf_performance(self, 'forward')
            else:
                print('forward+backward')
                t_mn, t_sd, p_mn, p_sd = get_tf_performance(
                    self, 'forward+backward')

            data = [['TitanXp', phase, self.name, t_mn, t_sd, p_mn, p_sd]]
            current_df = pd.DataFrame(data,
                                      columns=[
                                          'Platform', 'Phase', 'Benchmark',
                                          'Time Mean (sec)',
                                          'Time Standard Deviation (sec)',
                                          'Power Mean (Watt)',
                                          'Power Standard Deviation (Watt)'
                                      ])
            gpu_df = pd.concat([gpu_df, current_df], ignore_index=True)
            gpu_df.to_csv(csv_file, index=False)
        else:
            t_mn = float(r['Time Mean (sec)'])
            t_sd = float(r['Time Standard Deviation (sec)'])
            p_mn = float(r['Power Mean (Watt)'])
            p_sd = float(r['Power Standard Deviation (Watt)'])
        return t_mn, t_sd, p_mn, p_sd

    def load_params_from_pickle(self, pickle_filename):
        with open(pickle_filename, "rb") as h:
            if "2.7" in sys.version:
                params = pickle.load(h)
            elif "3.5" in sys.version:
                params = pickle.load(h, encoding='latin1')
            else:
                raise Exception("Unknown python version")

        for opname in params.keys():
            if opname in self.op_registry.keys():
                op = self.op_registry[opname]
                op.load_params(params[opname])
Exemple #57
0
    def _recomputeItems(self):
        #Some globals that we can re-use.
        global_container_stack = Application.getInstance().getGlobalContainerStack()
        if global_container_stack is None:
            return

        # Get the list of extruders and place the selected extruder at the front of the list.
        extruder_manager = ExtruderManager.getInstance()
        active_extruder = extruder_manager.getActiveExtruderStack()
        extruder_stacks = extruder_manager.getActiveExtruderStacks()
        if active_extruder in extruder_stacks:
            extruder_stacks.remove(active_extruder)
            extruder_stacks = [active_extruder] + extruder_stacks
        # Get a list of available qualities for this machine and material
        qualities = QualityManager.getInstance().findAllUsableQualitiesForMachineAndExtruders(global_container_stack,
                                                                                              extruder_stacks)
        container_registry = ContainerRegistry.getInstance()
        machine_manager = Application.getInstance().getMachineManager()

        unit = global_container_stack.getBottom().getProperty("layer_height", "unit")
        if not unit:
            unit = ""

        # group all quality items according to quality_types, so we know which profile suits the currently
        # active machine and material, and later yield the right ones.
        tmp_all_quality_items = OrderedDict()
        for item in super()._recomputeItems():
            profile = container_registry.findContainers(id=item["id"])
            quality_type = profile[0].getMetaDataEntry("quality_type") if profile else ""

            if quality_type not in tmp_all_quality_items:
                tmp_all_quality_items[quality_type] = {"suitable_container": None, "all_containers": []}

            tmp_all_quality_items[quality_type]["all_containers"].append(item)
            if tmp_all_quality_items[quality_type]["suitable_container"] is None and profile[0] in qualities:
                tmp_all_quality_items[quality_type]["suitable_container"] = item

        # reverse the ordering (finest first, coarsest last)
        all_quality_items = OrderedDict()
        for key in reversed(tmp_all_quality_items.keys()):
            all_quality_items[key] = tmp_all_quality_items[key]

        for data_item in all_quality_items.values():
            item = data_item["suitable_container"]
            if item is None:
                item = data_item["all_containers"][0]

            profile = container_registry.findContainers(id = item["id"])
            if not profile:
                item["layer_height"] = ""  # Can't update a profile that is unknown.
                item["available"] = False
                yield item
                continue

            profile = profile[0]
            item["available"] = profile in qualities

            # Easy case: This profile defines its own layer height.
            if profile.hasProperty("layer_height", "value"):
                self._setItemLayerHeight(item, profile.getProperty("layer_height", "value"), unit)
                yield item
                continue

            # Quality-changes profile that has no value for layer height. Get the corresponding quality profile and ask that profile.
            quality_type = profile.getMetaDataEntry("quality_type", None)
            if quality_type:
                quality_results = machine_manager.determineQualityAndQualityChangesForQualityType(quality_type)
                for quality_result in quality_results:
                    if quality_result["stack"] is global_container_stack:
                        quality = quality_result["quality"]
                        break
                else: #No global container stack in the results:
                    if quality_results:
                        quality = quality_results[0]["quality"] #Take any of the extruders.
                    else:
                        quality = None
                if quality and quality.hasProperty("layer_height", "value"):
                    self._setItemLayerHeight(item, quality.getProperty("layer_height", "value"), unit)
                    yield item
                    continue

            #Quality has no value for layer height either. Get the layer height from somewhere lower in the stack.
            skip_until_container = global_container_stack.material
            if not skip_until_container or skip_until_container == ContainerRegistry.getInstance().getEmptyInstanceContainer(): #No material in stack.
                skip_until_container = global_container_stack.variant
                if not skip_until_container or skip_until_container == ContainerRegistry.getInstance().getEmptyInstanceContainer(): #No variant in stack.
                    skip_until_container = global_container_stack.getBottom()
            self._setItemLayerHeight(item, global_container_stack.getRawProperty("layer_height", "value", skip_until_container = skip_until_container.getId()), unit)  # Fall through to the currently loaded material.
            yield item
Exemple #58
0
def create_conv_net(x,
                    keep_prob,
                    channels,
                    n_class,
                    layers=3,
                    features_root=16,
                    filter_size=3,
                    pool_size=2,
                    summaries=True,
                    x_size=(512, 512),
                    debug=True):
    """
    Creates a new convolutional unet for the given parametrization.

    :param x: input tensor, shape [?,nx,ny,channels]
    :param keep_prob: dropout probability tensor
    :param channels: number of channels in the input image
    :param n_class: number of output labels
    :param layers: number of layers in the net
    :param features_root: number of features in the first layer
    :param filter_size: size of the convolution filter
    :param pool_size: size of the max pooling operation
    :param summaries: Flag if summaries should be created
    """

    logging.info(
        "Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}"
        .format(layers=layers,
                features=features_root,
                filter_size=filter_size,
                pool_size=pool_size))

    # Placeholder for the input image
    '''
    with tf.name_scope("preprocessing"):
        nx = tf.shape(x)[1]
        ny = tf.shape(x)[2]
        x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
        in_node = x_image
        batch_size = tf.shape(x_image)[0]
    '''
    in_node = x

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_h_convs = OrderedDict()
    up_h_convs = OrderedDict()

    in_size = np.array(x_size)
    size = in_size
    # down layers
    for layer in range(0, layers):
        with tf.name_scope("down_conv_{}".format(str(layer))):
            features = 2**layer * features_root
            stddev = np.sqrt(2 / (filter_size**2 * features))
            if layer == 0:
                w1 = weight_variable(
                    [filter_size, filter_size, channels, features],
                    stddev,
                    name="w1")
            else:
                w1 = weight_variable(
                    [filter_size, filter_size, features // 2, features],
                    stddev,
                    name="w1")

            w2 = weight_variable(
                [filter_size, filter_size, features, features],
                stddev,
                name="w2")
            b1 = bias_variable([features], name="b1")
            b2 = bias_variable([features], name="b2")

            conv1 = conv2d(in_node, w1, b1, keep_prob)
            tmp_h_conv = tf.nn.relu(conv1)
            conv2 = conv2d(tmp_h_conv, w2, b2, keep_prob)
            dw_h_convs[layer] = tf.nn.relu(conv2)

            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv1, conv2))

            size -= 4
            if layer < layers - 1:
                pools[layer] = max_pool(dw_h_convs[layer], pool_size)
                in_node = pools[layer]
                size = size / 2

    mid_map = dw_h_convs[layers - 1]
    in_node = mid_map
    mid_shape = (size[0], size[1], features)

    weightds = []
    # up layers
    for layer in range(layers - 2, -1, -1):
        with tf.name_scope("up_conv_{}".format(str(layer))):
            features = 2**(layer + 1) * features_root
            stddev = np.sqrt(2 / (filter_size**2 * features))

            wd = weight_variable_devonc(
                [pool_size, pool_size, features // 2, features],
                stddev,
                name="wd")
            bd = bias_variable([features // 2], name="bd")
            h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd)
            h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv)
            deconv[layer] = h_deconv_concat

            w1 = weight_variable(
                [filter_size, filter_size, features, features // 2],
                stddev,
                name="w1")
            w2 = weight_variable(
                [filter_size, filter_size, features // 2, features // 2],
                stddev,
                name="w2")
            b1 = bias_variable([features // 2], name="b1")
            b2 = bias_variable([features // 2], name="b2")

            conv1 = conv2d(h_deconv_concat, w1, b1, keep_prob)
            h_conv = tf.nn.relu(conv1)
            conv2 = conv2d(h_conv, w2, b2, keep_prob)
            in_node = tf.nn.relu(conv2)
            up_h_convs[layer] = in_node

            weights.append((w1, w2))
            weightds.append((wd, bd))
            biases.append((b1, b2))
            convs.append((conv1, conv2))

            size *= 2
            size -= 4

    # Output Map
    with tf.name_scope("output_map"):
        weight = weight_variable([1, 1, features_root, n_class], stddev)
        bias = bias_variable([n_class], name="bias")
        conv = conv2d(in_node, weight, bias, tf.constant(1.0))
        output_map = tf.nn.relu(conv)
        up_h_convs["out"] = output_map

    if summaries:
        with tf.name_scope("summaries"):
            for i, (c1, c2) in enumerate(convs):
                tf.summary.image('summary_conv_%02d_01' % i,
                                 get_image_summary(c1))
                tf.summary.image('summary_conv_%02d_02' % i,
                                 get_image_summary(c2))

            for k in pools.keys():
                tf.summary.image('summary_pool_%02d' % k,
                                 get_image_summary(pools[k]))

            for k in deconv.keys():
                tf.summary.image('summary_deconv_concat_%02d' % k,
                                 get_image_summary(deconv[k]))

            for k in dw_h_convs.keys():
                tf.summary.histogram(
                    "dw_convolution_%02d" % k + '/activations', dw_h_convs[k])

            for k in up_h_convs.keys():
                tf.summary.histogram("up_convolution_%s" % k + '/activations',
                                     up_h_convs[k])

    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)

    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    for wd, bd in weightds:
        variables.append(wd)
        variables.append(bd)

    variables.append(weight)
    variables.append(bias)

    return output_map, mid_map, variables, size.astype(np.int), mid_shape
def fabricate_stems(tree_settings: TreeSettings, addsplinetobone, addstem,
                    baseSize, childP, cu, leafDist, leaves, leafType, n,
                    scaleVal, storeN, boneStep):

    #prevent baseSize from going to 1.0
    baseSize = min(0.999, baseSize)

    # Store the old rotation to allow new stems to be rotated away from the previous one.
    oldRotate = 0

    #use fancy child point selection / rotation
    if (n == 1) and (tree_settings.rMode != "original"):
        childP_T = OrderedDict()
        childP_L = []
        for i, p in enumerate(childP):
            if p.offset == 1:
                childP_L.append(p)
            else:
                p.index = i
                if p.offset not in childP_T:
                    childP_T[p.offset] = [p]
                else:
                    childP_T[p.offset].append(p)

        childP_T = [childP_T[k] for k in sorted(childP_T.keys())]

        childP = []
        rot_a = []
        for p in childP_T:
            if tree_settings.rMode == "rotate":
                if tree_settings.rotate[n] < 0.0:
                    oldRotate = -copysign(tree_settings.rotate[n], oldRotate)
                else:
                    oldRotate += tree_settings.rotate[n]
                bRotate = oldRotate + uniform(-tree_settings.rotateV[n],
                                              tree_settings.rotateV[n])

                #find center of split branches
                #average
                cx = sum([a.co[0] for a in p]) / len(p)
                cy = sum([a.co[1] for a in p]) / len(p)
                #center of range
                #xc = [a.co[0] for a in p]
                #yc = [a.co[1] for a in p]
                #cx = (max(xc) + min(xc)) / 2
                #cy = (max(yc) + min(yc)) / 2

                center = Vector((cx, cy, 0))
                center2 = Vector((-cx, cy))

                #choose start point whose angle is closest to the rotate angle
                a1 = bRotate % tau
                a_diff = []
                for a in p:
                    a = a.co
                    a = a - center
                    a2 = atan2(a[0], -a[1])
                    d = min((a1 - a2 + tau) % tau, (a2 - a1 + tau) % tau)
                    a_diff.append(d)

                idx = a_diff.index(min(a_diff))

                #find branch end point

                br = p[idx]
                b = br.co
                vx = sin(bRotate)
                vy = cos(bRotate)
                v = Vector((vx, vy))

                bD = ((b[0] * b[0] + b[1] * b[1])**.5)

                #acount for length
                bL = br.lengthPar * tree_settings.length[1] * shape_ratio(
                    tree_settings.shape, (1 - br.offset) / (1 - baseSize),
                    custom=tree_settings.customShape)

                #account for down angle
                if tree_settings.downAngleV[1] > 0:
                    downA = tree_settings.downAngle[n] + (
                        -tree_settings.downAngleV[n] * (1 - (1 - br.offset) /
                                                        (1 - baseSize))**2)
                else:
                    downA = tree_settings.downAngle[n]
                if downA < (.5 * pi):
                    downA = sin(downA)**2
                    bL *= downA

                bL *= 0.33  #adjustment constant value
                v *= (bD + bL)  #branch end point

                #find actual rotate angle from branch location
                bv = Vector((b[0], -b[1]))
                cv = v - bv - center2
                a = atan2(cv[0], cv[1])

                childP.append(p[idx])
                rot_a.append(a)

            elif tree_settings.rMode == 'distance':
                for i, br in enumerate(p):
                    rotV = tree_settings.rotateV[n] * .5
                    bRotate = tree_settings.rotate[n] * br.index
                    bL = br.lengthPar * tree_settings.length[1] * shape_ratio(
                        tree_settings.shape,
                        (1 - br.stemOffset) / (1 - baseSize),
                        custom=tree_settings.customShape)
                    if tree_settings.downAngleV[1] > 0:
                        downA = tree_settings.downAngle[n] + (
                            -tree_settings.downAngleV[n] *
                            (1 - (1 - br.stemOffset) / (1 - baseSize))**2)
                    else:
                        downA = tree_settings.downAngle[n]

                    downRotMat = Matrix.Rotation(downA, 3, 'X')
                    rotMat = Matrix.Rotation(bRotate, 3, 'Z')

                    bVec = zAxis.copy()
                    bVec.rotate(downRotMat)
                    bVec.rotate(rotMat)
                    bVec.rotate(convertQuat(br.quat))
                    bVec *= bL
                    p1 = bVec + br.co

                    #distance to other branches
                    isIntersect = []
                    for branch in p:
                        p2 = branch.co
                        p3 = p2 - p1
                        l = p3.length * uniform(1.0, 1.1)
                        bL = branch.lengthPar * tree_settings.length[
                            1] * shape_ratio(tree_settings.shape,
                                             (1 - branch.stemOffset) /
                                             (1 - baseSize),
                                             custom=tree_settings.customShape)
                        isIntersect.append(l < bL)

                    del isIntersect[i]

                    if not any(isIntersect):
                        childP.append(br)
                        rot_a.append(bRotate + uniform(-rotV, rotV))

            else:
                idx = randint(0, len(p) - 1)
                childP.append(p[idx])

        childP.extend(childP_L)
        rot_a.extend([0] * len(childP_L))

        oldRotate = 0

    for i, p in enumerate(childP):
        # Add a spline and set the coordinate of the first point.
        newSpline = cu.splines.new('BEZIER')
        newSpline.material_index = tree_settings.matIndex[n]
        #cu.resolution_u = resU
        newPoint = newSpline.bezier_points[-1]
        newPoint.co = p.co
        tempPos = zAxis.copy()
        # If the -ve flag for downAngle is used we need a special formula to find it
        if tree_settings.useOldDownAngle:
            if tree_settings.downAngleV[n] < 0.0:
                downV = tree_settings.downAngleV[n] * (1 - 2 *
                                                       (.2 + .8 *
                                                        ((1 - p.offset) /
                                                         (1 - baseSize))))
            # Otherwise just find a random value
            else:
                downV = uniform(-tree_settings.downAngleV[n],
                                tree_settings.downAngleV[n])
        else:
            if tree_settings.downAngleV[n] < 0.0:
                downV = uniform(-tree_settings.downAngleV[n],
                                tree_settings.downAngleV[n])
            else:
                downV = -tree_settings.downAngleV[n] * (
                    1 - (1 - p.stemOffset) /
                    (1 - baseSize))**2  #(110, 80) = (60, -50)

        if p.offset == 1:
            downRotMat = Matrix.Rotation(0, 3, 'X')
        else:
            downRotMat = Matrix.Rotation(tree_settings.downAngle[n] + downV, 3,
                                         'X')

        # If the -ve flag for rotate is used we need to find which side of the stem the last child point was and then grow in the opposite direction.
        if tree_settings.rotate[n] < 0.0:
            oldRotate = -copysign(tree_settings.rotate[n], oldRotate)
        # Otherwise just generate a random number in the specified range
        else:
            oldRotate += tree_settings.rotate[n]
        bRotate = oldRotate + uniform(-tree_settings.rotateV[n],
                                      tree_settings.rotateV[n])

        if (n == 1) and (tree_settings.rMode in ["rotate", 'distance']):
            bRotate = rot_a[i]

        rotMat = Matrix.Rotation(bRotate, 3, 'Z')

        # Rotate the direction of growth and set the new point coordinates
        tempPos.rotate(downRotMat)
        tempPos.rotate(rotMat)

        #use quat angle
        if (n == 1) and (p.offset != 1):
            if tree_settings.useParentAngle:
                tempPos.rotate(convertQuat(p.quat))
        else:
            tempPos.rotate(p.quat)

        newPoint.handle_right = p.co + tempPos * 0.33

        # Find branch length and the number of child stems.
        maxbL = scaleVal
        for l in tree_settings.length[:n + 1]:
            maxbL *= l
        lMax = tree_settings.length[n]  # * uniform(1 - lenV, 1 + lenV)
        if n == 1:
            lShape = shape_ratio(tree_settings.shape,
                                 (1 - p.stemOffset) / (1 - baseSize),
                                 custom=tree_settings.customShape)
        else:
            lShape = shape_ratio(tree_settings.shapeS,
                                 (1 - p.stemOffset) / (1 - baseSize))
        branchL = p.lengthPar * lMax * lShape
        childStems = tree_settings.branches[min(
            3, n + 1)] * (0.1 + 0.9 * (branchL / maxbL))

        # If this is the last level before leaves then we need to generate the child points differently
        if (storeN == tree_settings.levels - 1):
            if leafType == '4':
                childStems = 0  #False
            else:
                childStems = leaves * (0.1 + 0.9 *
                                       (branchL / maxbL)) * shape_ratio(
                                           leafDist, (1 - p.offset))

        #print("n=%d, levels=%d, n'=%d, childStems=%s"%(n, levels, storeN, childStems))

        # Determine the starting and ending radii of the stem using the tapering of the stem
        #startRad = min((p.radiusPar[0] * ((branchL / p.lengthPar) ** ratioPower)) * radiusTweak[n], 10)
        ratio = (p.radiusPar[0] - p.radiusPar[2]) / p.lengthPar
        startRad = min(((ratio * branchL)**tree_settings.ratioPower) *
                       tree_settings.radiusTweak[n],
                       p.radiusPar[1])  #p.radiusPar[1] #10
        #startRad = min((ratio * p.lengthPar * ((branchL / p.lengthPar) ** ratioPower)) * radiusTweak[n], 10)#p.radiusPar[1]
        #p.radiusPar[2] is parent end radius
        if p.offset == 1:
            startRad = p.radiusPar[1]
        endRad = (startRad *
                  (1 - tree_settings.taper[n]))**tree_settings.ratioPower
        startRad = max(startRad, tree_settings.minRadius)
        endRad = max(endRad, tree_settings.minRadius)
        newPoint.radius = startRad

        # stem curvature
        curveVal = tree_settings.curve[n] / tree_settings.curveRes[n]
        curveVar = tree_settings.curveV[n] / tree_settings.curveRes[n]

        #curveVal = curveVal * (branchL / scaleVal)

        # Add the new stem to list of stems to grow and define which bone it will be parented to
        nstem = StemSpline(newSpline, curveVal, curveVar,
                           tree_settings.attractUp[n], 0,
                           tree_settings.curveRes[n],
                           branchL / tree_settings.curveRes[n], childStems,
                           startRad, endRad,
                           len(cu.splines) - 1, 0, p.quat)
        if (n == 1) and (p.offset == 1):
            nstem.isFirstTip = True
        addstem(nstem)

        bone = roundBone(p.parBone, boneStep[n - 1])
        if p.offset == 1:
            isend = True
        else:
            isend = False
        addsplinetobone((bone, isend))
Exemple #60
0
    val = c[letters]
    per = round((val / float(length)) * 100, 3)
    L.append(per)
    print "Frequency of " + letters + " is: " + str(val) + " - ", str(per)
    alpha_percentage[letters] = per

try:
    alpha_percentage = OrderedDict(sorted(alpha_percentage.items()))
    plt.figure(figsize=(18, 7))
    rects = plt.bar(range(len(alpha_percentage)),
                    alpha_percentage.values(),
                    width=0.7,
                    color='r',
                    align='center',
                    edgecolor='b')
    plt.xticks(range(len(alpha_percentage)), alpha_percentage.keys())
    plt.yticks(range(int(max(L) + 3)))
    plt.title('Frequency Of Letters')
    plt.ylabel('Frequency')
    plt.xlabel('Letters')
    plt.grid()
    #rects = plt.patches
    for rect, label in zip(rects, alpha_percentage.values()):
        height = rect.get_height()
        #print height
        plt.text(rect.get_x() + rect.get_width() / 2,
                 height + 0.3,
                 str(round(label, 2)),
                 ha='center',
                 va='bottom',
                 color='blue',