Exemple #1
0
    def __init__(self, *args, **kwargs):
        """ initialize a dataset from a set of objects of varying dimensions

        data  : dict of DimArrays or list of named DimArrays or Axes object
        keys  : keys to order data if provided as dict, or to name data if list
        """
        assert not {'axes','keys'}.issubset(kwargs.keys()) # just to check bugs due to back-compat ==> TO BE REMOVED AFTER DEBUGGING

        # check input arguments: same init as odict
        data = odict(*args, **kwargs)

        # Basic initialization
        #self._axes = Axes()
        self._axes = DatasetAxes(self)
        self._attrs = odict()

        # initialize an ordered dictionary
        super(Dataset, self).__init__()
        #self.data = odict()

        values = data.values()
        keys = data.keys()

        # Check everything is a DimArray
        #for key, value in zip(keys, values):
        for i, key in enumerate(keys):
            if not isinstance(values[i], DimArray):
                values[i] = self._constructor(values[i])

        # Align objects
        values = align_axes(values)

        # Append object (will automatically update self.axes)
        for key, value in zip(keys, values):
            self[key] = value
Exemple #2
0
    def transform(self, data, path ):
        debug( "%s: called" % str(self))

        if len(list(data.keys())) < 2:
            raise ValueError( "expected at least two arrays, got only %s." % str(list(data.keys())) )

        pairs = itertools.combinations( list(data.keys()), 2)

        new_data = odict()

        for x in list(data.keys()): new_data[x] = odict()
        
        for x,y in pairs:
            xvals, yvals = data[x], data[y]
            if self.paired:
                if len(xvals) != len(yvals):
                    raise ValueError("expected to arrays of the same length, %i != %i" % (len(xvals),
                                                                                          len(yvals)))
                take = [i for i in range(len(xvals)) if xvals[i] != None and yvals[i] != None \
                            and type(xvals[i]) in (float,int,int) and type(yvals[i]) in (float,int,int) ]
                xvals = [xvals[i] for i in take ]
                yvals = [yvals[i] for i in take ]

            try:
                result = self.apply( xvals, yvals )
            except ValueError as msg:
                warn( "pairwise computation failed: %s" % msg)
                continue

            new_data[x][y] = result

        return new_data
Exemple #3
0
    def transform(self, data, path):
        debug( "%s: called" % str(self))

        if len(data) == 0: return data
        
        keys = list(data.keys())

        if self.labels: 
            labels = data[self.labels]
            del keys[keys.index(self.labels)]
            if len(keys) < 1: 
                raise ValueError( "TransformerToLabels requires at least two arrays, got only 1, if tf-labels is set" )
        else: 
            max_nkeys = max([len(x) for x in list(data.values()) ])
            labels = list(range(1, max_nkeys + 1))

        labels = list(map(str, labels))

        if len(data) == 2:
            new_data = odict(list(zip(labels, data[keys[0]])))
        else:
            new_data = odict()
            for key in keys:
                new_data[key] = odict(list(zip(labels, data[key])))
                
        return new_data
Exemple #4
0
    def __call__(self, track, slice=None):

        v = int(track[-1]) * int(slice[-1])
        if slice == "slice1":
            return odict((("column1", v), ("column2", v * 2)))
        elif slice == "slice2":
            return odict((("column1", v), ("column2", v * 2), ("column3", v * 3)))
Exemple #5
0
def build_tree(reader):
    global messages
    tree = odict()
    row_groups = odict()
    _, row = next_line(reader)
    accessibility_case_id = None
    while True:
        if NUMERIC_ID.match(row[EXPRESSION]):
            accessibility_case_id = row[EXPRESSION]
            row_groups[accessibility_case_id] = []
        elif len(row[EXPRESSION]) > 0:
            row_groups[accessibility_case_id].append(row)
        try:
            lineno, row = next_line(reader)
            row.append("lineno: %s" % lineno)
        except StopIteration:
            break
    for acid, rows in row_groups.items():
        rows = [['(']] + rows + [[')']]
        it = iter(rows)
        row = next(it)
        tree[acid] = build_expression(it, row, depth=0)
    for acid, expression in tree.items():
        rescope(expression, 'messages')
        rescope(expression, 'flags')
    for acid, expression in tree.items():
        gather_messages(expression)
    return tree, messages
Exemple #6
0
    def setUp(self):

        spec_OBO_1_1_0 = (
            b'<spectrum id="spectrum=1019" index="8" defaultArrayLength="431">'
        )
        spec_OBO_1_1_0 = b'<spectrum id="scan=3" index="0" sourceFileRef="SF1" defaultArrayLength="92">'
        spec_OBO_1_0_0 = (
            b'<spectrum index="317" id="S318" nativeID="318" defaultArrayLength="34">'
        )
        spec_OBO_0_99_1 = b'<spectrum id="S20" scanNumber="20" msLevel="2">'

        chro_OBO_1_1_0 = ""
        chro_OBO_1_1_0 = ""
        chro_OBO_1_0_0 = ""
        chro_OBO_0_99_1 = ""

        self.spec_tags = odict(
            [
                ("OBO_1_1_0", spec_OBO_1_1_0),
                ("OBO_1_1_0", spec_OBO_1_1_0),
                ("OBO_1_0_0", spec_OBO_1_0_0),
                ("OBO_0_99_1", spec_OBO_0_99_1),
            ]
        )

        self.chro_tags = odict(
            [
                ("OBO_1_1_0", chro_OBO_1_1_0),
                ("OBO_1_1_0", chro_OBO_1_1_0),
                ("OBO_1_0_0", chro_OBO_1_0_0),
                ("OBO_0_99_1", chro_OBO_0_99_1),
            ]
        )
Exemple #7
0
    def __call__(self, track, *args ):
        """count number of entries in a table."""

        config = configparser.ConfigParser()
        config.readfp(open(track),"r")

        result = odict()

        def convert( value ):
            '''convert a value to int, float or str.'''
            rx_int = re.compile("^\s*[+-]*[0-9]+\s*$")
            rx_float = re.compile("^\s*[+-]*[0-9.]+[.+\-eE][+-]*[0-9.]*\s*$")

            if value == None: return value

            if rx_int.match( value ):
                return int(value), "int"
            elif rx_float.match( value ):
                return float(value), "float"
            return value, "string"

        for section in config.sections():
            x = odict()
            for key,value in config.items( section ):
                x[key] = odict( list(zip( ("value", "type" ), convert( value ))) )
            result[section] = x
        
        return result
Exemple #8
0
    def __init__(self, data):
        # convert file to string
        try:
            data = data.read()
        except AttributeError:
            # try opening the string as a file and reading it
            try:
                with open(data, 'rb') as datafile:
                    data = datafile.read()
            except (IOError, TypeError):
                pass

        # convert string to dict
        try:
            data = self.import_from_json(data)
        except TypeError:
            pass

        # read data from dict
        self.nouns = odict((nid, Noun(ndata)) for nid, ndata in data.get('nouns', {}).iteritems())
        self.rooms = odict((rid, Room(rdata)) for rid, rdata in data.get('rooms', {}).iteritems())
        self.vars = odict((var, int(value)) for var, value in data.get('vars', {}).iteritems())
        self.messages = data.get('messages', {})
        self.lexicon = Lexicon(data.get('words', []))
        self.controls = odict((stage, ([self.parse_control(cdata) for cdata in scontrols]
                                       if not isinstance(scontrols, (basestring, dict))
                                       else [self.parse_control(scontrols)]))
                              for stage, scontrols in data.get('controls', {}).iteritems())

        self.validate()
 def __init__(self, base_folder=None, likelihoods=None):
     fullnames = odict([["commander", "commander_v4.1_lm49.clik"],
                        ["camspec",   "CAMspec_v6.2TN_2013_02_26_dist.clik"],
                        ["lowlike",   "lowlike_v222.clik"]])
     likelihoods_fullnames = []
     if likelihoods:
         for lik in likelihoods:
             if lik.lower() in fullnames:
                 likelihoods_fullnames.append(fullnames[lik.lower()])
             else:
                 raise ValueError("Likelihood name not recognised: %s.\n"%lik+ 
                                  "Valid ones are "+str(fullnames.keys()))
         self._likelihoods_names = likelihoods_fullnames
     else:
         self._likelihoods_names = fullnames.values()
     # Initialize!
     self._likelihoods = odict()
     for lik in self._likelihoods_names:
         full_path = os.path.join(base_folder, lik)
         try:
             self._likelihoods[lik] = clik.clik(full_path)
         except clik.lkl.CError:
             raise ValueError("'clik' failed to initialise the requested "+
                              "likelihood %s"%lik+", probably because it was"+
                              " not found on the given folder: '%s'"%full_path)
     # Get nuisance parameters
     self._nuisance_parameters = dict([lik_name,{}]
                                      for lik_name in self._likelihoods_names)
     for lik in self._likelihoods_names:
         names = self._likelihoods[lik].extra_parameter_names
         self._nuisance_parameters[lik] = ({} if not names else
             odict([[pname,None] for pname in names]))
Exemple #10
0
def import_B737_H2():
    
    hrm_geometry_filename = 'Vehicles/CAD/adl_B737-H2/xsec_adl_B737-H2.hrm'
    cad_geometry_filename = 'Vehicles/CAD/adl_B737-H2/geom_adl_B737-H2.sldprt'
    
    components = read_hrm(hrm_geometry_filename)
    
    # trim wing sections
    isecs_1 = range(0,5+1,1)
    isecs_2 = range(5,11,1) + range(11,47,6) + range(47,56,2) + range(56,98,6) + range(98,105+1,1)
    wing_1 = odict()
    wing_2 = odict()
    for i in isecs_1:
        key = components['Wing'].keys()[i]
        wing_1[key] = components['Wing'][key]
    for i in isecs_2:
        key = components['Wing'].keys()[i]
        wing_2[key] = components['Wing'][key]        
    components['Wing_1'] = wing_1
    components['Wing_2'] = wing_2
        
        
    
    SW = pySolidWorks.SolidWorks()
    SW.visible()
    SW_Model = SW.new_part()
    #SW_Model = SW.open_model(cad_geometry_filename)
    
    
    cad_component(components,'Wing_1',SW_Model,True)
    cad_component(components,'Wing_2',SW_Model,True)
    cad_component(components,'Tails',SW_Model,True)
    cad_component(components,'fuselage',SW_Model)
def filters(input_file,md,out_file,cthresh=100.0,rthresh=100.0,order='column',delimit=','):
	col_headers = 0
	valdict = odict()
	with open(input_file,'r') as filer:
		counter = 0
		for line in filer:
			if counter == 0:
				col_headers = tuple(i for i in line.rstrip('\r\n').split(delimit)[1:])
				counter += 1
			else:
				init_parse = line.rstrip('\r\n').split(delimit)
				valdict[init_parse[0]] = odict([tuple([col_headers[d],i]) for d,i in enumerate(init_parse[1:])])
	final_dict = odict()
	if order == 'column':
		first_rem = [key for key in col_headers if (len([md for key1,value1 in valdict.iteritems() if value1[key] == md])/float(len(valdict))) *100 < cthresh]
		for k,v in valdict.iteritems():
			if (len([md for ke,va in v.iteritems() if ke in first_rem and va == md])/float(len(first_rem))) *100 < rthresh:
				final_dict[k] = odict([tuple([k1,v1]) for k1,v1 in v.iteritems() if k1 in first_rem])
	elif order == 'row':
		first_rem = [ke for ke,va in valdict.iteritems() if (va.values().count(md)/float(len(col_headers))) *100 < rthresh]
		col_rem = [key for key in col_headers if (len([md for key1,value1 in valdict.iteritems() if value1[key] == md and key1 in first_rem])/float(len(first_rem))) *100 < cthresh]
		for k,v in valdict.iteritems():
			if k in first_rem:
				final_dict[k] = odict([tuple([k1,v1]) for k1,v1 in v.iteritems() if k1 in col_rem])
	with open(out_file,'w') as outter:
		col_order = final_dict.values()[0].keys()
		outter.write(delimit + delimit.join(col_order)+'\n')
		for kw,vw in final_dict.iteritems():
			outter.write(kw+delimit+delimit.join([vw[t]for t in col_order])+'\n')
    def read_info(self, framedir, name, split=None):
        """Read information about file contents without reading the data.

        Information is a dictionary containing as aminimum the shape and
        type.
        """
        fn = os.path.join(framedir, name + '.ulm')
        if split is None or os.path.exists(fn):
            f = ulmopen(fn, 'r')
            info = odict()
            info['shape'] = f.shape
            info['type'] = f.dtype
            info['stored_as'] = f.stored_as
            info['identical'] = f.all_identical
            f.close()
            return info
        else:
            info = odict()
            for i in range(split):
                fn = os.path.join(framedir, name + '_' + str(i) + '.ulm')
                f = ulmopen(fn, 'r')
                if i == 0:
                    info['shape'] = list(f.shape)
                    info['type'] = f.dtype
                    info['stored_as'] = f.stored_as
                    info['identical'] = f.all_identical
                else:
                    info['shape'][0] += f.shape[0]
                    assert info['type'] == f.dtype
                    info['identical'] = info['identical'] and f.all_identical
                f.close()
            info['shape'] = tuple(info['shape'])
            return info
 def read_info(self, framedir, name, split=None):
     "Read information about file contents without reading the data."
     fn = os.path.join(framedir, name + '.pickle')
     if split is None or os.path.exists(fn):
         f = open(fn, 'rb')
         if self.readpy2:
             info = pickle.load(f, encoding='latin1')
         else:
             info = pickle.load(f)
         f.close()
         result = odict()
         result['shape'] = info[0]
         result['type'] = info[1]
         return result
     else:
         for i in range(split):
             fn = os.path.join(framedir, name + '_' + str(i) + '.pickle')
             f = open(fn, 'rb')
             if self.readpy2:
                 info = pickle.load(f, encoding='latin1')
             else:
                 info = pickle.load(f)
             f.close()
             if i == 0:
                 shape = list(info[0])
                 dtype = info[1]
             else:
                 shape[0] += info[0][0]
                 assert dtype == info[1]
         result = odict()
         result['shape'] = info[0]
         result['type'] = info[1]
         return result
Exemple #14
0
  def __init__(self, molecule, **kwargs):
    PlanewaveInput.__init__(self, molecule, **kwargs)
    self.setting.update(**kwargs)
    if 'pp_theory' not in kwargs:
      self.setting['pp_theory'] = self.setting['theory']
    self.backup()

    mode_dict = {
      'single_point': 'scf',
    }

    self.content = odict()

    mode = mode_dict[self.setting['mode']]
    self.content['control'] = odict([
      ('calculation', mode),
      ('pseudo_dir', './'),
    ])
    self.content['system'] = odict([
      ('ibrav', 0),
      ('ecutwfc', self.setting['cutoff']),
    ])
    self.content['electrons'] = odict([
      ('electron_maxstep', self.setting['scf_step']),
    ])
Exemple #15
0
    def collect( self ):
        '''collect all data.

        Data is stored in a multi-level dictionary (DataTree)
        '''

        self.data = odict()

        self.debug( "%s: collecting data paths." % (self.tracker))        
        is_function, datapaths = self.getDataPaths(self.tracker)
        self.debug( "%s: collected data paths." % (self.tracker))        

        # if function, no datapaths
        if is_function:
            d = self.getData( () )

            # save in data tree as leaf
            DataTree.setLeaf( self.data, ("all",), d )

            self.debug( "%s: collecting data finished for function." % (self.tracker))
            return

        # if no tracks, error
        if len(datapaths) == 0 or len(datapaths[0]) == 0:
            self.warn( "%s: no tracks found - no output" % self.tracker )
            return

        self.debug( "%s: filtering data paths." % (self.tracker))        
        # filter data paths
        datapaths = self.filterDataPaths( datapaths )
        self.debug( "%s: filtered data paths." % (self.tracker))        

        # if no tracks, error
        if len(datapaths) == 0 or len(datapaths[0]) == 0:
            self.warn( "%s: no tracks remain after filtering - no output" % self.tracker )
            return

        self.debug( "%s: building all_paths" % (self.tracker ) )
        if len(datapaths) > MAX_PATH_NESTING:
            self.warn( "%s: number of nesting in data paths too large: %i" % (self.tracker, len(all_paths)))
            raise ValueError( "%s: number of nesting in data paths too large: %i" % (self.tracker, len(all_paths)))

        all_paths = list(itertools.product( *datapaths ))
        self.debug( "%s: collecting data started for %i data paths" % (self.tracker, 
                                                                       len( all_paths) ) )

        self.data = odict()
        for path in all_paths:

            d = self.getData( path )

            # ignore empty data sets
            if d is None: continue

            # save in data tree as leaf
            DataTree.setLeaf( self.data, path, d )

        self.debug( "%s: collecting data finished for %i data paths" % (self.tracker, 
                                                                       len( all_paths) ) )
        return self.data
Exemple #16
0
	def __init__(self, hostname):
		self.hostname = hostname
		self.ip_loopback = None
		self.log_elements = list()
		self.chassis_info = dict()
		self.power_feeds = dict()
		self.fan_trays = dict()
		self.cf3_free = dict()
		self.system_info = dict()
		self.snmp_info = dict()
		self.cpu_idle = None
		self.ntp_server = dict()
		self.sync_if_timing = None
		self.timos_version = None
		self.bof_address = None
		self.telnet_session = dict()
		self.tacplus_auths = dict()
		self.syslog_servers = dict()
		self.snmp_traps = dict()
		
		self.valid_toreport = 0
		self.optical_ports = dict()
		self.checkslist = dict()
		self.card_details = dict()
		self.mda_details = dict()
		self.card_states = list()
		
		self.sap_using = dict()
		self.service_using = dict()
		self.service_config = odict()
		self.sap_config = odict()
def fromCache(cache, tracks=None, slices=None, groupby="slice"):
    """return a data tree from cache"""

    data = DataTree()
    keys = [x.split("/") for x in list(cache.keys())]

    if tracks == None:
        tracks = set([x[0] for x in keys])
    else:
        tracks = tracks.split(",")

    if slices == None:
        slices = set([x[1] for x in keys if len(x) > 1])
    else:
        slices = slices.split(",")

    def tokey(track, slice):
        return "/".join((track, slice))

    if not slices:
        for track in tracks:
            data[track] = cache[track]
    elif groupby == "slice" or groupby == "all":
        for slice in slices:
            data[slice] = odict()
            for track in tracks:
                data[slice][track] = cache[tokey(track, slice)]
    elif groupby == "track":
        for track in tracks:
            data[track] = odict()
            for slice in slices:
                data[track][slice] = cache[tokey(track, slice)]
    return data
Exemple #18
0
def group_datasets(data, grp_REs):
    dsets = odict() # dsets: meaning further grouping, based on which
                          # ploting will be done

    # structure of dsets: dict of dict of dict ...
    # dset = {
    #     'dset0': {
    #         'groupkey0': ('mean0', 'std0'),
    #         'groupkey1': ('mean1', 'std1'),
    #         ...
    #         },
    #     'dset1': {
    #         'groupkey0': ('mean0', 'std0'),
    #         'groupkey1': ('mean1', 'std1'),
    #         ...
    #         },
    #     ...
    #     }
        
    for c, RE in enumerate(grp_REs):
        dsetk = 'dset{0}'.format(c)                   # k means key
        _ = dsets[dsetk] = odict()
        for key in data.keys():
            if re.search(RE, key):
                _.update({key:data[key]})
    return dsets
Exemple #19
0
def _read_salt2(f, **kwargs):
    """Read a new-style SALT2 file.

    Such a file has metadata on lines starting with '@' and column names
    on lines starting with '#' and containing a ':' after the column name.
    There is optionally a line containing '#end' before the start of data.
    """

    meta = odict()
    colnames = []
    cols = []
    readingdata = False
    for line in f:

        # strip leading & trailing whitespace & newline
        line = line.strip()
        if len(line) == 0:
            continue

        if not readingdata:
            # Read metadata
            if line[0] == '@':
                pos = line.find(' ')  # Find first space.
                if pos in [-1, 1]:  # Space must exist and key must exist.
                    raise ValueError('Incorrectly formatted metadata line: ' +
                                     line)
                meta[line[1:pos]] = _cast_str(line[pos:])
                continue

            # Read header line
            if line[0] == '#':
                pos = line.find(':')
                if pos in [-1, 1]:
                    continue  # comment line
                colname = line[1:pos].strip()
                if colname == 'end':
                    continue
                colnames.append(colname)
                cols.append([])
                continue

            # If the first non-whitespace character is not '@' or '#',
            # assume the line is the first data line.
            readingdata = True

        # strip comments
        pos = line.find('#')
        if pos > -1:
            line = line[:pos]
        if len(line) == 0:
            continue

        # Now we're reading data
        items = line.split()
        for col, item in zip(cols, items):
            col.append(_cast_str(item))

    data = odict(zip(colnames, cols))

    return meta, data
Exemple #20
0
    def __init__(self, aggression):
        # Do not allow to create AIstate instances with an invalid version number.
        if not hasattr(AIstate, 'version'):
            raise ConversionError("AIstate must have an integer version attribute for savegame compatibility")
        if not isinstance(AIstate.version, int):
            raise ConversionError("Version attribute of AIstate must be an integer!")
        if AIstate.version < 0:
            raise ConversionError("AIstate savegame compatibility version must be a positive integer!")
            
        # need to store the version explicitly as the class variable "version" is only stored in the 
        # self.__class__.__dict__ while we only pickle the object (i.e. self.__dict__ )
        self.version = AIstate.version
        
        # Debug info
        # unique id for game
        self.uid = self.generate_uid(first=True)
        # unique ids for turns.  {turn: uid}
        self.turn_uids = {}

        self._aggression = aggression

        # 'global' (?) variables
        self.colonisablePlanetIDs = odict()
        self.colonisableOutpostIDs = odict()  #
        self.__aiMissionsByFleetID = {}
        self.__shipRoleByDesignID = {}
        self.__fleetRoleByID = {}
        self.diplomatic_logs = {}
        self.__priorityByType = {}

        # initialize home system knowledge
        universe = fo.getUniverse()
        empire = fo.getEmpire()
        self.empireID = empire.empireID
        homeworld = universe.getPlanet(empire.capitalID)
        self.__origin_home_system_id = homeworld.systemID if homeworld else INVALID_ID
        self.visBorderSystemIDs = {self.__origin_home_system_id}
        self.visInteriorSystemIDs = set()
        self.exploredSystemIDs = set()
        self.unexploredSystemIDs = {self.__origin_home_system_id}
        self.fleetStatus = {}  # keys: 'sysID', 'nships', 'rating'
        # systemStatus keys: 'name', 'neighbors' (sysIDs), '2jump_ring' (sysIDs), '3jump_ring', '4jump_ring', 'enemy_ship_count'
        # 'fleetThreat', 'planetThreat', 'monsterThreat' (specifically, immobile nonplanet threat), 'totalThreat', 'localEnemyFleetIDs',
        # 'neighborThreat', 'max_neighbor_threat', 'jump2_threat' (up to 2 jumps away), 'jump3_threat', 'jump4_threat', 'regional_threat'
        # 'myDefenses' (planet rating), 'myfleets', 'myFleetsAccessible'(not just next desitination), 'myFleetRating'
        # 'my_neighbor_rating' (up to 1 jump away), 'my_jump2_rating', 'my_jump3_rating', my_jump4_rating'
        # 'local_fleet_threats', 'regional_fleet_threats' <== these are only for mobile fleet threats
        self.systemStatus = {}
        self.needsEmergencyExploration = []
        self.newlySplitFleets = {}
        self.militaryRating = 0
        self.shipCount = 4
        self.misc = {}
        self.qualifyingColonyBaseTargets = {}
        self.qualifyingOutpostBaseTargets = {}
        self.qualifyingTroopBaseTargets = {}
        self.__empire_standard_enemy = CombatRatingsAI.default_ship_stats().get_stats(hashable=True)  # TODO: track on a per-empire basis
        self.empire_standard_enemy_rating = 0  # TODO: track on a per-empire basis
        self.character = create_character(aggression, self.empireID)
Exemple #21
0
  def query(self, q, collection=None):
    """Retrieve the color palette associated with a query term (optionally filtered to a
       single collection).

       q is a string present in the list returned by self.queries()
       collection (if present) is a collection name found in self.collections

       returns a (collection-name, colors-dict) tuple containing the name of the collection
       where the query term was found and a nested dictionary with the mixture parameters
       for the color scheme. The scheme's dict structure is of the form:

        {
          "green":{
            "weight": 0.01,
            "shades":{
              "neutral": 0.1,
              "fresh": 0.2,
              "warm": 0.3,
            }
          },
          "blue":{...},
          ...
        }

    """
    with self.cursor as c:
      collections=self._collections

      c.execute("""SELECT * FROM queries WHERE name=?""", [q])
      if collection:
        for query in c.fetchall():
          # prefer a match in the specified collection
          if query['collection'] == collections[collection]: break
        else:
          # not found in preferred collection
          return collection, {}
      else:
        # if collection is None, search in all the cached collections
        query = c.fetchone()
        if not query:
          # didn't find the query string in any of them
          return collection, {}

      found_in = dict(zip(collections.values(), collections.keys()))[query['collection']]
      c.execute("""SELECT color.name AS color, shade.name AS shade, mixture.weight
                   FROM weights AS mixture
                   LEFT JOIN words AS color ON mixture.color = color.id
                   LEFT JOIN words AS shade ON mixture.shade = shade.id
                   WHERE query=? ORDER BY shade ASC, weight DESC""", [query['id']])
      colors = odict()
      for row in c.fetchall():
        clr = colors.get(row['color'], dict(weight=0, shades=odict()))
        if not row['shade']:
          clr['weight'] = row['weight']
        else:
          clr['shades'][row['shade']] = row['weight']
        if row['color'] not in colors:
          colors[row['color']] = clr
      return found_in, colors
 def __call__(self, track, slice = None):
     if slice == "slice1":
         return odict( (("column1", 10),
                        ("column2", 20 ),) )
     elif slice == "slice2":
         return odict ( (("column1", 20),
                         ("column2", 10),
                         ("column3", 5),) )
Exemple #23
0
def Fluidigm_parser(infile,outfile,altn=None,keep=None,delimiter=',',mkeep=None,four_state=False,geno_map=False):
	if mkeep == None:
		marker_var = []
	else:
		marker_var = mkeep
	with open(infile, 'r') as inputter:
		reader = inputter.readlines()
		reffer = [d for d,k in enumerate(reader) if k.find('Chip Barcode') != -1] # This marks where the data begins.
		inputter_init = [i.split(delimiter) for i in reader[reffer[0] + 1:]]
	if marker_var != []:
		#bin_lister = [i[11].split(':')[0]+i[11].split(':')[1] if i[11].find(':') != -1 else 'ZZ' for i in inputter_init if i[3] in marker_var] #This gets the actual genotype for the geno_map option. Not currently allowed to be selected by user.
		#bin_list variable is a list of lists with the first index being the Assay column, the second is the XY genotype, and the third is the Sample Name.
		bin_list = [[i[3],list(i[10])[0],list(i[10])[1],i[6]] if all([True if z == 'X' or z == 'Y' else False for z in i[10]]) else [i[3],'Z','Z',i[6]] for i in inputter_init if i[3] in marker_var]	
	else:
		#bin_lister = [i[11].split(':')[0]+i[11].split(':')[1] if i[11].find(':') != -1 else 'ZZ' for i in inputter_init]
		#bin_list variable is a list of lists with the first index being the Assay column, the second is the XY genotype, and the third is the Sample Name.
		bin_list = [[i[3],list(i[10])[0],list(i[10])[1],i[6]] if all([True if z == 'X' or z == 'Y' else False for z in i[10]]) else [i[3],'Z','Z',i[6]] for i in inputter_init]	
	bin_dic = odict()
	#if geno_map:
		#geno_dict = odict()
	for d,i in enumerate(bin_list):
		if keep == None:
			if i[-1] not in bin_dic:
				bin_dic[i[-1]] = odict([(i[0],i[1]+i[2])])
				#if geno_map:
					#geno_dict[i[-1]]=odict([i[0],(i[1]+i[2],bin_lister[d])])
			else:
				if i[0] in bin_dic[i[-1]]:
					raise SystemError('Duplicate Marker: ' + i[0])
				else:
					bin_dic[i[-1]].update(odict([(i[0],i[1]+i[2])]))
				#if geno_map:
					#geno_dict[i[-1]].update(odict([i[0],(i[1]+i[2],bin_lister[d])]))			
		else:
			if i[-1] in keep:
				if i[-1] not in bin_dic:
					bin_dic[i[-1]] = odict([(i[0],i[1]+i[2])])				
					#if geno_map:
						#geno_dict[i[-1]]=odict([i[0],(i[1]+i[2],bin_lister[d])])
				else:
					if i[0] in bin_dic[i[-1]]:
						raise SystemError('Duplicate Marker: ' + i[-1],i[0])
					else:
						bin_dic[i[-1]].update(odict([(i[0],i[1]+i[2])]))
				#if geno_map:
					#geno_dict[i[-1]].update(odict([i[0],(i[1]+i[2],bin_lister[d])]))	
	marker_order = list(bin_dic.values())[0].keys()
	if four_state:
		convert_dic = {'XX':'1', 'YY':'2', 'XY':'3','YX':'3','ZZ':'0'}
	else:
		convert_dic = {'XX':'1', 'YY':'2', 'XY':'3','YX':'3','ZZ':'3'}
	with open(outfile+'.csv','w') as outter:
		outter.write(','+','.join(marker_order)+'\n')
		for key, value in bin_dic.items():
			if altn == None:
				outter.write(key+','+','.join([convert_dic[value[e]] for e in marker_order])+'\n')
			else:
				outter.write(altn[key]+','+','.join([convert_dic[value[e]] for e in marker_order])+'\n')
Exemple #24
0
	def __new__(meta, name, bases, attrs):
		"""Gather known attributes together, preserving order, and transfer attribute names to them."""
		
		# Short-circuit this logic on the root "Element" class, as it can have no attributes.
		if len(bases) == 1 and bases[0] is object:
			attrs['__attributes__'] = odict()
			return type.__new__(meta, str(name), bases, attrs)
		
		attributes = odict()
		overridden_sequence = dict()
		fixups = []
		
		# Gather the parent classes that participate in our protocol.
		for base in bases:
			if hasattr(base, '__attributes__'):
				attributes.update(base.__attributes__)
		
		# To allow for hardcoding of Attributes we eliminate keys that have been redefined.
		# They might get added back later, of course.
		for k in attrs:
			if k in attributes:
				overridden_sequence[k] = attributes[k].__sequence__
				attributes.pop(k, None)
		
		def process(name, attr):
			"""Process attributes that are Element subclass instances."""
			
			# If no name has been defined, define it declaratively.
			if not getattr(attr, '__name__', None):
				attr.__name__ = name
			
			# If this attribute existed previously, clone the sequence number to preserve order.
			if name in overridden_sequence:
				attr.__sequence__ = overridden_sequence[name]
			
			# We give attributes a chance to perform additional work.
			if hasattr(attr, '__fixup__'):
				fixups.append(attr)  # Record the attribute to prevent __get__ transformation later.
			
			return name, attr
		
		# Iteratively process the Element subclass instances and update their definition.
		attributes.update(process(k, v) for k, v in attrs.items() if isinstance(v, Element))
		attrs['__attributes__'] = odict(sorted(attributes.items(), key=lambda t: t[1].__sequence__))
		
		# Construct the new class.
		cls = type.__new__(meta, str(name), bases, attrs)
		
		# Allow the class to be notified of its own construction.  Do not ask how this avoids creating black holes.
		if hasattr(cls, '__attributed__'):
			cls.__attributed__()
		
		# We do this now to allow mutation on the completed class.
		for obj in fixups:
			obj.__fixup__(cls)
		
		return cls
Exemple #25
0
def _write_json(f, data, meta, **kwargs):

    # Build a dictionary of pure-python objects
    output = odict([('meta', meta),
                    ('data', odict())])
    for key in data.dtype.names:
        output['data'][key] = data[key].tolist()
    json.dump(output, f)
    del output
Exemple #26
0
    def __call__(self, track, slice):

        if slice == "slice1":
            return odict((("column1", randint(0,100)),
                          ("column2", randint(0,100)),))
        elif slice == "slice2":
            return odict((("column1", randint(0,100)),
                          ("column2", randint(0,100)),
                          ("column3", randint(0,100)),))
Exemple #27
0
    def write(self, outfile):
        """
        Save the likelihood results as a sparse HEALPix map.
        """
        data = odict()
        data['PIXEL']=self.roi.pixels_target
        # Full data output (too large for survey)
        if self.config['scan']['full_pdf']:
            data['LOG_LIKELIHOOD']=self.log_likelihood_sparse_array.T
            data['RICHNESS']=self.richness_sparse_array.T
            data['RICHNESS_LOWER']=self.richness_lower_sparse_array.T
            data['RICHNESS_UPPER']=self.richness_upper_sparse_array.T
            data['RICHNESS_LIMIT']=self.richness_upper_limit_sparse_array.T
            #data['STELLAR_MASS']=self.stellar_mass_sparse_array.T
            data['FRACTION_OBSERVABLE']=self.fraction_observable_sparse_array.T
        else:
            data['LOG_LIKELIHOOD']=self.log_likelihood_sparse_array.T
            data['RICHNESS']=self.richness_sparse_array.T
            data['FRACTION_OBSERVABLE']=self.fraction_observable_sparse_array.T

        # Convert to 32bit float
        for k in list(data.keys())[1:]:
            data[k] = data[k].astype('f4',copy=False)
            
        # Stellar mass can be calculated from STELLAR * RICHNESS
        header = odict()
        header['STELLAR']=round(self.stellar_mass_conversion,8)
        header['LKDNSIDE']=self.config['coords']['nside_likelihood']
        header['LKDPIX']=ang2pix(self.config['coords']['nside_likelihood'],
                                 self.roi.lon,self.roi.lat)
        header['NROI']=self.roi.inROI(self.loglike.catalog_roi.lon,
                                      self.loglike.catalog_roi.lat).sum()
        header['NANNULUS']=self.roi.inAnnulus(self.loglike.catalog_roi.lon,
                                              self.loglike.catalog_roi.lat).sum()
        header['NINSIDE']=self.roi.inInterior(self.loglike.catalog_roi.lon,
                                              self.loglike.catalog_roi.lat).sum()
        header['NTARGET']=self.roi.inTarget(self.loglike.catalog_roi.lon,
                                            self.loglike.catalog_roi.lat).sum()

        # Flatten if there is only a single distance modulus
        # ADW: Is this really what we want to do?
        if len(self.distance_modulus_array) == 1:
            for key in data:
                data[key] = data[key].flatten()

        logger.info("Writing %s..."%outfile)
        write_partial_map(outfile,data,
                          nside=self.config['coords']['nside_pixel'],
                          header=header,
                          clobber=True
                          )
        
        fitsio.write(outfile,
                     dict(DISTANCE_MODULUS=self.distance_modulus_array.astype('f4',copy=False)),
                     extname='DISTANCE_MODULUS',
                     clobber=False)
Exemple #28
0
def grp_datasets(data, pt_dd):
    grp_REs = pt_dd['grp_REs']
    dsets = odict()
    for c, RE in enumerate(grp_REs):
        dsetk = 'dset{0}'.format(c)
        _ = dsets[dsetk] = odict()
        for key in data.keys():
            if re.search(RE, key):
                _[key] = data[key]
    return dsets
Exemple #29
0
	def _backup_originals(self):
		self._originals_ = odict()
		for fieldname, fieldtype in self._fields_:
			original = getattr(self, fieldname)
			if isinstance(original, _Pointer) and issubclass(original._type_, MethodMapping):
				self._originals_[fieldname] = odict()
				# noinspection PyUnresolvedReferences
				for field in original.contents.fieldnames:
					self._originals_[field] = getattr(original.contents, field)
			else:
				self._originals_[fieldname] = original
Exemple #30
0
	def test_operator_range(self):
		op = Queryable.range(mock_queryable, 5, 11)
		assert isinstance(op, Ops)
		
		assert op.as_query == odict({'field_name': odict([('$gte', 5), ('$lt', 11)])})
		
		if __debug__:
			a = MockQueryable()
			a.__disallowed_operators__ = {'#range'}
			
			with pytest.raises(NotImplementedError):
				Queryable.range(a, 5, 11)
Exemple #31
0
    def _executeQuery(self, command, args=None):
        '''execute the provided command on the database. args are specified as a dictionary.
        error checking on the results is performed here and the returned value is a list of
        lists with each list representing a row of the returned table. '''

        try:
            argument_string = "&".join(["%s=%s" % (x, args[x]) for x in args])
            command_string = "&".join([command, argument_string])
        except TypeError:
            command_string = command

        query = "%s?cmd=%s" % (self.url, command_string)

        if len(query) > 2048:
            if "gene_list" in args:
                genes = args['gene_list'].split(",")

                if len(genes) < 2:
                    raise ValueError("Request too long")

                args['gene_list'] = ",".join(genes[(len(genes) / 2):])
                query1 = self._executeQuery(command, args)
                warnings = self.last_warnings

                args['gene_list'] = ",".join(genes[:(len(genes) / 2)])
                query2 = self._executeQuery(command, args)
                self.last_warnings = warnings + self.last_warnings

                return query1 + query2

        data = urllib2.urlopen(query)

        line = data.readline()
        self.last_query = query
        self.last_status = line
        self.last_warnings = []
        self.last_header = [self.last_status]
        return_table = []

        while re.match("^#", line):

            if re.match("^# Warning: (.+)", line):
                self.last_warnings.append(
                    re.match("^# Warning: (.+)", line).groups(1)[0])
                self.last_header.append(line)
                line = data.readline()
                continue

            elif re.match("^#", line):
                self.last_header.append(line)
                line = data.readline()
                continue

        if re.match("^Error: (.+)", line):
            self.last_header.append(line)
            raise CDGSError(re.match("^Error: (.+)", line).groups(1)[0], query)
        line = line.strip()
        headers = line.split("\t")

        for line in data:
            if re.match("^# Warning: (.+)", line):
                self.last_warnings.append(
                    re.match("^# Warning: (.+)", line).groups(1)[0])
                self.last_header.append(line)
                line = data.readline()
                continue
            line = line.strip()
            return_table.append(odict(zip(headers, line.split("\t"))))

        return return_table
Exemple #32
0
 def __preprocessQHints(self, hints):
     val = hints.get("$orderby")
     if isinstance(val, list):
         hints["$orderby"] = odict(val)
     return hints
Exemple #33
0
class Scheduler(object):
    """
    Deal with survey scheduling.
    """
    _defaults = odict([
        ('tactician', 'coverage'),
        ('windows', os.path.join(fileio.get_datadir(),
                                 "maglites-windows.csv")),
        ('targets',
         os.path.join(fileio.get_datadir(), "maglites-target-fields.csv")),
    ])
    FieldType = FieldArray

    def __init__(self,
                 target_fields=None,
                 windows=None,
                 completed_fields=None):
        self.load_target_fields(target_fields)
        self.load_windows(windows)
        self.load_observed_fields()
        self.load_completed_fields(completed_fields)

        self.scheduled_fields = self.FieldType()
        self.observatory = CTIO()

    def load_target_fields(self, target_fields=None):
        if target_fields is None:
            target_fields = self._defaults['targets']

        if isinstance(target_fields, basestring):
            self.target_fields = self.FieldType.read(target_fields)
        else:
            self.target_fields = self.FieldType(target_fields)
        return self.target_fields

    def load_windows(self, windows=None):
        """
        Load the set of start and stop times for the observation windows.
        """
        if windows is None:
            windows = self._defaults['windows']
            logging.info("Setting default observing windows:\n %s" % windows)

        if isinstance(windows, basestring):
            windows = fileio.csv2rec(windows)

        self.windows = []
        for start, end in windows:
            self.windows.append([ephem.Date(start), ephem.Date(end)])

        # Sanity check that observation windows are properly sorted
        for ii, (start, end) in enumerate(self.windows):
            msg = 'Observation windows are not properly sorted\n'
            msg += '%s: %s -- %s' % (get_nite(start), datestr(start),
                                     datestr(end))
            if (end < start):
                logging.warn(msg)
            if ii > 0 and (start < self.windows[ii - 1][1]):
                logging.warn(msg)

        logging.info('Observation Windows:')
        for start, end in self.windows:
            logging.info('  %s: %s UTC -- %s UTC' %
                         (get_nite(start), datestr(start), datestr(end)))
        logging.info(30 * '-')

    def load_observed_fields(self):
        """
        Load fields from the telemetry database that were already observed.
        """
        try:
            fields = self.FieldType.load_database()
        except Exception as e:
            logging.warn("Failed to load completed exposures from database")
            logging.info(e)
            fields = self.FieldType()
        self.observed_fields = fields
        return self.observed_fields

    def load_completed_fields(self, completed_fields=None):
        """Load completed fields. The default behavior is to load the
        observed_fields as completed_fields. However, if the string
        'None' is passed then return an empty FieldArray.

        Parameters:
        -----------
        completed_fields : Filename, list of filenames, or FieldArray-type object.

        Returns:
        --------
        fields           : FieldArray of the completed fields
        """
        # Deal with 'None' string
        if isinstance(completed_fields, list):
            if completed_fields[0].lower() == 'none':
                self.completed_fields = self.FieldType()
                return self.completed_fields
        elif isinstance(completed_fields, basestring):
            if completed_fields.lower() == 'none':
                self.completed_fields = self.FieldType()
                return self.completed_fields

        self.completed_fields = copy.deepcopy(self.observed_fields)

        if not completed_fields:
            return self.completed_fields

        if isinstance(completed_fields, basestring):
            completed_fields = [completed_fields]

        if isinstance(completed_fields, list):
            fields = self.FieldType()
            for filename in completed_fields:
                fields = fields + self.FieldType.read(filename)

            completed_fields = fields

        new = ~np.in1d(completed_fields.unique_id,
                       self.completed_fields.unique_id)
        new_fields = completed_fields[new]
        self.completed_fields = self.completed_fields + new_fields
        return self.completed_fields

    def create_tactician(self, tactician=None):
        if tactician is None: tactician = self._defaults['tactician']
        return tactician_factory(tactician, mode=tactician)

    def select_field(self, date, mode='coverage'):
        """
        Select field(s) using the survey tactician.

        Parameters:
        -----------
        date       : ephem.Date object
        mode       : Type of tactician to use for selecting field

        Returns:
        --------
        field      : selected field(s) from tactician
        """
        sel = ~np.in1d(self.target_fields['ID'], self.completed_fields['ID'])

        self.tactician = self.create_tactician(mode)
        self.tactician.set_date(date)
        self.tactician.set_target_fields(self.target_fields[sel])
        self.tactician.set_completed_fields(self.completed_fields)

        field_select = self.tactician.select_fields()

        logging.debug(str(field_select))

        # For diagnostic purposes
        if False and len(self.scheduled_fields) % 10 == 0:
            weight = self.tactician.weight
            ortho.plotWeight(field_select[-1], self.target_fields,
                             self.tactician.weight)
            raw_input('WAIT')

        if len(field_select) == 0:
            logging.error("No field selected... we've got problems.")
            msg = "date=%s\n" % (datestr(date))
            msg += "index_select=%s, index=%s\n" % (index_select, index)
            msg += "nselected=%s, selection=%s\n" % (cut.sum(),
                                                     cut[index_select])
            msg += "weights=%s" % weight
            logging.info(msg)
            #ortho.plotWeight(self.scheduled_fields[-1], self.target_fields, self.tactician.weight)
            ortho.plotField(self.scheduled_fields[-1],
                            self.scheduled_fields,
                            options_basemap=dict(date='2017/02/20 05:00:00'))
            raw_input('WAIT')
            import pdb
            pdb.set_trace()
            raise Exception()

        return field_select

    def run(self,
            tstart=None,
            tstop=None,
            clip=False,
            plot=False,
            mode='coverage'):
        """
        Schedule a chunk of exposures. This is the loop where date is incremented

        Parameters:
        -----------
        tstart : Chunk start time
        tstop  : Chunk end time (may be replace with chunk length)
        plot   : Plot the chunk (may be removed)

        Returns:
        --------
        fields : Scheduled fields
        """
        # Reset the scheduled fields
        self.scheduled_fields = self.FieldType()

        # If no tstop, run for 90 minutes
        timedelta = 90 * ephem.minute
        if tstart is None: tstart = ephem.now()
        if tstop is None: tstop = tstart + timedelta

        # Convert strings into dates
        if isinstance(tstart, basestring):
            tstart = ephem.Date(tstart)
        if isinstance(tstop, basestring):
            tstop = ephem.Date(tstop)

        msg = "Run start: %s\n" % datestr(tstart, 4)
        msg += "Run end: %s\n" % datestr(tstop, 4)
        msg += "Run time: %s minutes" % (timedelta / ephem.minute)
        logging.debug(msg)

        msg = "Previously completed fields: %i" % len(self.completed_fields)
        logging.info(msg)

        msg = "Scheduling with tactician: %s" % mode
        logging.info(msg)

        date = tstart
        latch = True
        while latch:
            logging.debug(' ' + datestr(date, 4))

            # Check to see if in valid observation window
            if self.windows is not None:
                inside = False
                for window in self.windows:
                    if date >= window[0] and date < window[-1]:
                        inside = True
                        break

                if not inside:
                    if clip:
                        break
                    else:
                        msg = 'Date outside of nominal observing windows'
                        logging.warning(msg)

            # Select one (or more) fields from the tactician
            field_select = self.select_field(date, mode)

            # Now update the time from the selected field
            date = ephem.Date(field_select[-1]['DATE']) + constants.FIELDTIME

            self.completed_fields = self.completed_fields + field_select
            self.scheduled_fields = self.scheduled_fields + field_select

            msg = " %(DATE).19s: id=%(ID)10s, secz=%(AIRMASS).2f, slew=%(SLEW).2f"
            msg += ", moon=%(PHASE).0f%%,%(ALT).0fdeg"
            for i, f in zip(field_select.unique_id, field_select):
                params = dict([('ID', i)] + [(k, f[k]) for k in f.dtype.names])
                params.update({
                    'PHASE': self.tactician.moon.phase,
                    "ALT": np.degrees(self.tactician.moon.alt)
                })
                logging.info(msg % params)

            #if plot: self.plotField(date, field_select)
            if plot:
                ortho.plotField(field_select[:-1], self.target_fields,
                                self.completed_fields)
            if date >= tstop: break

        msg = "Newly scheduled fields: %i" % len(self.scheduled_fields)
        logging.info(msg)

        return self.scheduled_fields

    def schedule_field(self,
                       hex,
                       tiling,
                       band=None,
                       date=None,
                       plot=False,
                       mode=None):
        """
        Schedule a single filed at a given time.

        Parameters:
        -----------
        hexid  : the hex ID of the field
        tiling : the tiling number of the field
        band   : The band of the field
        date   : The date/time for observation
        plot   : Plot the output
        mode   : Mode for scheduler tactician

        Returns:
        --------
        field : The scheduled field
        """
        # Probably cleaner to make this it's own tactician
        date = ephem.Date(date) if date else ephem.now()

        select = (self.target_fields['HEX'] == hex)
        select &= (self.target_fields['TILING'] == tiling)
        if band is not None:
            select &= (self.target_fields['FILTER'] == band)
        index = np.nonzero(select)[0]

        field = self.target_fields[select]
        nfields = select.sum()
        field['DATE'] = map(datestring, nfields * [date])
        return field

    def schedule_chunk(self,
                       tstart=None,
                       chunk=60,
                       clip=False,
                       plot=False,
                       mode=None):
        """
        Schedule a chunk of exposures.

        Parameters:
        -----------
        tstart : Start time (UTC); in `None` use `ephem.now()`
        chunk  : Chunk of time to schedule.
        plot   : Dynamically plot each scheduled exposure
        mode   : Mode for scheduler tactician

        Returns:
        --------
        fields : Scheduled fields
        """
        # If no tstop, run for 90 minutes
        if tstart is None: tstart = ephem.now()
        tstop = tstart + chunk * ephem.minute

        return self.run(tstart, tstop, clip, plot, mode)

    def schedule_nite(self,
                      date=None,
                      chunk=60,
                      clip=False,
                      plot=False,
                      mode=None):
        """
        Schedule a night of observing.

        A `nite` is defined by the day (UTC) at noon local time before
        observing started.

        Parameters:
        -----------
        date  : The date of the nite to schedule
        chunk : The duration of a chunk of exposures (minutes)
        plot  : Dynamically plot the progress after each chunk
        mode  : Mode for scheduler tactician

        Returns:
        --------
        chunks : A list of the chunks generated for the scheduled nite.
        """

        # Create the nite
        nite = get_nite(date)

        # Convert chunk to MJD
        if chunk > 1: chunk = chunk * ephem.minute

        try:
            nites = [get_nite(w[0]) for w in self.windows]
            idx = nites.index(nite)
            start, finish = self.windows[idx]
        except (TypeError, ValueError):
            msg = "Requested nite (%s) not found in windows:\n" % nite
            msg += '[' + ', '.join([n for n in nites]) + ']'
            logging.warning(msg)

            start = date
            self.observatory.date = date
            self.observatory.horizon = self.observatory.twilight
            finish = self.observatory.next_rising(ephem.Sun(), use_center=True)
            self.observatory.horizon = '0'

            logging.info("Night start (UTC):  %s" % datestr(start))
            logging.info("Night finish (UTC): %s" % datestr(finish))

        chunks = []
        i = 0
        while start < finish:
            i += 1
            msg = "Scheduling %s -- Chunk %i" % (start, i)
            logging.debug(msg)
            end = start + chunk
            scheduled_fields = self.run(start,
                                        end,
                                        clip=clip,
                                        plot=False,
                                        mode=mode)

            if plot:
                field_select = scheduled_fields[-1:]
                ortho.plotField(field_select, self.target_fields,
                                self.completed_fields)
                if (raw_input(' ...continue ([y]/n)').lower() == 'n'):
                    break

            chunks.append(scheduled_fields)
            start = ephem.Date(chunks[-1]['DATE'][-1]) + constants.FIELDTIME
            #start = end

        if plot: raw_input(' ...finish... ')

        return chunks

    def schedule_survey(self,
                        start=None,
                        end=None,
                        chunk=60,
                        plot=False,
                        mode=None):
        """
        Schedule the entire survey.

        Parameters:
        -----------
        start : Start of survey (int or str)
        end   : End of survey (int or str)
        chunk : The duration of a chunk of exposures (minutes)
        plot  : Dynamically plot the progress after each night
        mode  : Mode of scheduler tactician

        Returns:
        --------
        scheduled_nites : An ordered dictionary of scheduled nites
        """

        self.scheduled_nites = odict()

        for tstart, tend in self.windows:
            if start is not None and ephem.Date(tstart) < ephem.Date(start):
                continue
            if end is not None and ephem.Date(tend) > ephem.Date(end):
                continue

            #nite = nitestring(tstart)
            nite = get_nite(tstart)

            try:
                chunks = self.schedule_nite(tstart,
                                            chunk,
                                            clip=True,
                                            plot=False,
                                            mode=mode)
            except ValueError as error:
                ortho.plotField(self.completed_fields[-1:], self.target_fields,
                                self.completed_fields)
                raise (error)

            self.scheduled_nites[nite] = chunks

            if plot:
                ortho.plotField(
                    self.completed_fields[-1:], self.target_fields,
                    self.completed_fields
                )  #,options_basemap=dict(date='2017/02/21 05:00:00'))

                if (raw_input(' ...continue ([y]/n)').lower() == 'n'):
                    break

        if plot: raw_input(' ...finish... ')
        return self.scheduled_nites

    def write(self, filename):
        self.scheduled_fields.write(filename)

    @classmethod
    def common_parser(cls):
        """
        Comman argument parser for scheduler tools.
        """
        from obztak.utils.parser import Parser, DatetimeAction

        description = __doc__
        parser = Parser(description=description)
        #parser.add_argument('--survey',choices=['obztak','maglites','bliss'],
        #                    default = None, help='choose survey to schedule.')
        parser.add_argument('-p',
                            '--plot',
                            action='store_true',
                            help='create visual output.')
        parser.add_argument('--utc',
                            '--utc-start',
                            dest='utc_start',
                            action=DatetimeAction,
                            help="start time for observation.")
        parser.add_argument('--utc-end',
                            action=DatetimeAction,
                            help="end time for observation.")
        parser.add_argument('-k',
                            '--chunk',
                            default=60.,
                            type=float,
                            help='time chunk')
        parser.add_argument('-f',
                            '--fields',
                            default=None,
                            help='all target fields.')
        parser.add_argument('-m',
                            '--mode',
                            default='coverage',
                            help='Mode for scheduler tactician.')
        parser.add_argument('-w',
                            '--windows',
                            default=None,
                            help='observation windows.')
        parser.add_argument('-c',
                            '--complete',
                            nargs='?',
                            action='append',
                            help="fields that have been completed.")
        parser.add_argument('-o',
                            '--outfile',
                            default=None,
                            help='save output file of scheduled fields.')
        parser.add_argument('--write-protect',
                            action='store_true',
                            help='write-protect output files')
        return parser

    @classmethod
    def parser(cls):
        return cls.common_parser()

    @classmethod
    def main(cls):
        args = cls.parser().parse_args()
        scheduler = cls(args.fields, args.windows, args.complete)
        scheduler.run(tstart=args.utc_start,
                      tstop=args.utc_end,
                      plot=args.plot)
        if args.outfile:
            scheduler.scheduled_fields.write(args.outfile)

        return scheduler
Exemple #34
0
def residuals(pixel,
              nside=NSIDE,
              plot=False,
              y1a1dir='y1a1/v1/hpx/',
              y2q1dir='cat/'):
    y1a1file = os.path.join(y1a1dir, 'cat_hpx_%05d.fits' % pixel)
    y2q1file = os.path.join(y2q1dir, 'cat_hpx_%05d.fits' % pixel)
    print y1a1file, y2q1file

    y1a1 = fitsio.read(y1a1file, columns=Y1A1_COLUMNS)
    y2q1 = fitsio.read(y2q1file, columns=Y2Q1_COLUMNS)
    y2q1 = y2q1[(y2q1['WAVG_SPREAD_MODEL_R'] < 0.002)]

    #hpx = ang2pix(nside,y1a1['RA'],y1a1['DEC'])
    #y1a1 = recfuncs.rec_append_fields('HPX',hpx,dtypes=int)
    #
    #hpx = ang2pix(nside,y2q1['RA'],y2q1['DEC'])
    #y2q1 = recfuncs.rec_append_fields('HPX',hpx,dtypes=int)

    #if plot:
    #
    #    fig,axes = plt.subplots(1,2,figsize=(12,5))

    kwargs = dict(histtype='step', lw=1.5, normed=True)
    ret = odict()
    for band in BANDS:
        mag, magerr = bfields(['WAVG_MAG_PSF', 'WAVG_MAGERR_PSF'], band)
        color = COLORS[band]

        y1 = y1a1[(y1a1[mag] < 22) & (y1a1[mag] > 17)]
        y2 = y2q1[(y2q1[mag] < 22) & (y2q1[mag] > 17)]

        match = ugali.utils.projector.match(y1['RA'], y1['DEC'], y2['RA'],
                                            y2['DEC'])
        sepsec = 3600. * match[-1]
        sel = (sepsec < 1.0)
        idx1 = match[0][sel]
        idx2 = match[1][sel]

        y1_match = y1[idx1]
        y2_match = y2[idx2]

        res = (y2[mag][idx2] - y1[mag][idx1])
        res_clip, lo, hi = scipy.stats.sigmaclip(res, 5, 5)

        mu, sigma = norm.fit(res_clip)
        median = np.median(res_clip)
        ret[band] = (median, mu, sigma)

        if plot:
            bins = np.linspace(-0.1, 0.1, 100)
            centers = (bins[1:] + bins[:-1]) / 2.
            kwargs['bins'] = bins
            axes[0].hist(res, color=color, **kwargs)
            mu, sigma = norm.fit(res[(res > bins.min()) & (res < bins.max())])
            label = r'$%s\ (\mu=%.2f,\sigma=%.2f)$' % (band, mu, sigma)
            axes[0].plot(centers,
                         norm.pdf(centers, mu, sigma),
                         color=color,
                         label=label)

    if plot:
        plt.sca(axes[0])
        plt.legend(fontsize=8)
        plt.sca(axes[1])
        plt.legend(fontsize=8)

    return pixel, ret
Exemple #35
0
    def write(self, name=None, **kwargs):
        self.setting.update(kwargs)
        if 'geopt' in self.setting and self.setting['geopt']:
            self.content['control']['calculation'] = 'relax'
            self.content['ions'] = odict([
                ('ion_dynamics', 'damp'),
                ('ion_damping', 0.2),
                ('ion_velocities', 'zero'),
            ])
        if 'root_dir' not in kwargs:
            self.setting['root_dir'] = name

        dft_dict = {
            'hse06': 'hse',
            'lda': 'pz',
        }

        def atomIndex(molecule):
            """
      add index to all atoms, not necessary...
      """
            for i in range(len(molecule.index) - 1):
                start = molecule.index[i]
                end = molecule.index[i + 1]
                if end - start > 1:
                    for I in range(start, end):
                        element = molecule.type_list[I] + str(I)
                        molecule.setAtoms(I, element=element)
            molecule.sort()
            return molecule

        def writeInp(name=None, **setting):

            inp, molecule = \
              PlanewaveInput.write(self, name, **setting)

            molecule.sort()

            type_index = molecule.index
            type_list = molecule.type_list
            pp_files = []

            self.content['system']['nat'] = molecule.N
            self.content['system']['ntyp'] = len(type_index) - 1
            if setting['full_kmesh']:
                self.content['system']['nosym'] = True
                self.content['system']['noinv'] = True

            if 'restart' in setting and setting['restart']:
                self.content['control']['restart_mode'] = 'restart'

            if 'save_density' in setting and setting['save_density']:
                self.content['control']['wf_collect'] = True

            if 'save_wf' in setting and setting['save_wf']:
                self.content['control']['wf_collect'] = True
            if 'print_force' not in setting or not setting['print_force']:
                self.content['control']['tprnfor'] = True

            if not setting['periodic']:
                self.content['system']['assume_isolated'] = 'mt'

            if 'exx' in setting and setting['exx'] == 'anisotropic':
                self.content['system']['exxdiv_treatment'] = 'vcut_ws'
                self.content['system']['ecutvcut'] = 0.7
                self.content['system']['x_gamma_extrapolation'] = False

            if molecule.charge != 0:
                self.content['system']['tot_charge'] = molecule.charge
            if molecule.multiplicity != 1:
                self.content['system']['nspin'] = 2
                diff = molecule.multiplicity - 1
                self.content['system']['tot_magnetization'] = diff

            if 'theory' in setting:
                if self.setting['theory'] in dft_dict:
                    self.content['system']['input_dft'] = \
                      dft_dict[self.setting['theory']]
                else:
                    self.content['system']['input_dft'] = self.setting[
                        'theory']

            if 'scf_step' in setting:
                self.content['electrons']['electron_maxstep'] =\
                  setting['scf_step']

            if 'ks_states' in setting and setting['ks_states']:
                vs = int(round(self.molecule.getValenceElectrons() / 2.0))
                self.content['system']['nbnd'] = setting['ks_states'] + vs
                if 'd_shell' in setting:
                    for a in molecule.type_list:
                        if a in setting['d_shell'] and qtk.n2ve(a) < 10:
                            self.content['system']['nbnd'] += 5

            if 'symmetry' in setting:
                if setting['symmetry'] == 'fcc':
                    self.content['system']['ibrav'] = 2
                    setting['fractional_coordinate'] = True
                    dm = ['A', 'B', 'C', 'cosBC', 'cosAC', 'cosAB']
                    for i in range(6):
                        self.content['system'][dm[i]] = float(
                            self.molecule.celldm[i])

            for section_key in self.content.iterkeys():
                section = '&' + section_key + '\n'
                inp.write(section)
                for key, value in self.content[section_key].iteritems():
                    if type(value) is str:
                        entry = " %s = '%s',\n" % (key, value)
                    elif type(value) is int:
                        entry = ' %s = %d,\n' % (key, value)
                    elif type(value) is float:
                        entry = ' %s = %14.8E,\n' % (key, value)
                    elif type(value) is bool:
                        if value:
                            entry = ' %s = .true.,\n' % key
                        else:
                            entry = ' %s = .false.,\n' % key
                    inp.write(entry)
                inp.write('/\n')

            inp.write("ATOMIC_SPECIES\n")
            for a in range(len(type_index) - 1):
                type_n = type_index[a + 1] - type_index[a]
                PPStr = PPString(self, molecule, type_index[a], type_n, inp)
                stem, ext = os.path.splitext(PPStr)
                if ext != '.UPF':
                    PPStr = PPStr + '.UPF'
                mass = qtk.n2m(type_list[type_index[a]])
                inp.write(' %-3s % 6.3f %s\n' % \
                  (type_list[type_index[a]], mass, PPStr))
                pp_files.append(PPStr)
            inp.write("\n")

            inp.write("ATOMIC_POSITIONS ")
            if self.content['system']['ibrav'] == 0:
                if not setting['fractional_coordinate']:
                    inp.write("angstrom\n")
                    R = molecule.R
                else:
                    inp.write("crystal\n")
                    R = molecule.R_scale
            else:
                inp.write("\n")
                R = molecule.R_scale
            for a in range(len(type_list)):
                inp.write(' %-3s' % type_list[a])
                for i in range(3):
                    inp.write(' % 12.8f' % R[a, i])
                inp.write("\n")
            inp.write("\n")

            if 'kmesh' in setting and setting['kmesh']:
                inp.write("K_POINTS automatic\n")
                for k in setting['kmesh']:
                    inp.write(" %d" % k)
                for s in range(3):
                    inp.write(" 0")
                inp.write('\n\n')

            if 'save_restart' in setting and setting['save_restart']:
                inp.write("ALCHEMY reference\n\n")

            if 'restart' in setting and setting['restart']:
                if 'scf_step' in setting:
                    if setting['scf_step'] != 1:
                        qtk.warning('alchemy with optimization...')
                    inp.write("ALCHEMY prediction\n\n")

            if self.content['system']['ibrav'] == 0:
                inp.write("CELL_PARAMETERS angstrom\n")
                if 'lattice' not in self.setting:
                    self.celldm2lattice()
                lattice_vec = self.setting['lattice']
                for vec in lattice_vec:
                    for component in vec:
                        inp.write(' % 11.6f' % component)
                    inp.write('\n')

            for pp in pp_files:
                pp_file = os.path.join(qtk.setting.espresso_pp, pp)
                if pp not in inp.dependent_files:
                    inp.dependent_files.append(pp_file)

            if 'no_cleanup' in setting and setting['no_cleanup']:
                inp.close(no_cleanup=True)
            else:
                inp.close()

            return inp

        setting = copy.deepcopy(self.setting)
        inp = writeInp(name, **setting)

        return inp
Exemple #36
0
    def __init__(self, aggression):
        # Do not allow to create AIstate instances with an invalid version number.
        if not hasattr(AIstate, 'version'):
            raise ConversionError(
                "AIstate must have an integer version attribute for savegame compatibility"
            )
        if not isinstance(AIstate.version, int):
            raise ConversionError(
                "Version attribute of AIstate must be an integer!")
        if AIstate.version < 0:
            raise ConversionError(
                "AIstate savegame compatibility version must be a positive integer!"
            )

        # need to store the version explicitly as the class variable "version" is only stored in the
        # self.__class__.__dict__ while we only pickle the object (i.e. self.__dict__ )
        self.version = AIstate.version

        # Debug info
        # unique id for game
        self.uid = self.generate_uid(first=True)
        # unique ids for turns.  {turn: uid}
        self.turn_uids = {}

        self._aggression = aggression

        # 'global' (?) variables
        self.colonisablePlanetIDs = odict()
        self.colonisableOutpostIDs = odict()  #
        self.__aiMissionsByFleetID = {}
        self.__shipRoleByDesignID = {}
        self.__fleetRoleByID = {}
        self.diplomatic_logs = {}
        self.__priorityByType = {}

        # initialize home system knowledge
        universe = fo.getUniverse()
        empire = fo.getEmpire()
        self.empireID = empire.empireID
        homeworld = universe.getPlanet(empire.capitalID)
        self.__origin_home_system_id = homeworld.systemID if homeworld else INVALID_ID
        self.visBorderSystemIDs = {self.__origin_home_system_id}
        self.visInteriorSystemIDs = set()
        self.exploredSystemIDs = set()
        self.unexploredSystemIDs = {self.__origin_home_system_id}
        self.fleetStatus = {}  # keys: 'sysID', 'nships', 'rating'
        # systemStatus keys:
        # 'name', 'neighbors' (sysIDs), '2jump_ring' (sysIDs), '3jump_ring', '4jump_ring', 'enemy_ship_count',
        # 'fleetThreat', 'planetThreat', 'monsterThreat' (specifically, immobile nonplanet threat), 'totalThreat',
        # 'localEnemyFleetIDs', 'neighborThreat', 'max_neighbor_threat', 'jump2_threat' (up to 2 jumps away),
        # 'jump3_threat', 'jump4_threat', 'regional_threat', 'myDefenses' (planet rating), 'myfleets',
        # 'myFleetsAccessible'(not just next desitination), 'myFleetRating', 'my_neighbor_rating' (up to 1 jump away),
        # 'my_jump2_rating', 'my_jump3_rating', my_jump4_rating', 'local_fleet_threats',
        # 'regional_fleet_threats' <== these are only for mobile fleet threats
        self.systemStatus = {}
        self.needsEmergencyExploration = []
        self.newlySplitFleets = {}
        self.militaryRating = 0
        self.shipCount = 4
        self.misc = {}
        self.qualifyingColonyBaseTargets = {}
        self.qualifyingOutpostBaseTargets = {}
        self.qualifyingTroopBaseTargets = {}
        # TODO: track on a per-empire basis
        self.__empire_standard_enemy = CombatRatingsAI.default_ship_stats(
        ).get_stats(hashable=True)
        self.empire_standard_enemy_rating = 0  # TODO: track on a per-empire basis
        self.character = create_character(aggression, self.empireID)
Exemple #37
0
 def __init__(self):
     self.name2table = odict()  # {}
     self.infile = None
Exemple #38
0
  def read(self,fid_list,itype='auto',all_mo=True,nosym=False, sort=True, **kwargs_all):
    '''Reads a list of input files.
    
    **Parameters:**
    
      fid_list : list of str
        List of input file names.
      itype : str, choices={'auto', 'tar', 'molden', 'gamess', 'gaussian.log', 'gaussian.fchk'}
        Specifies the type of the input files.
      sort: bool
        Sort input files by name.
    '''
    # self.geo_info and ao_info have to stay unchanged
    geo_old = []
    ao_old = []
    
    sym_list = {}
    n_ao = {}

    #Check if fname poits to a tar archive and
    #read all files from archive if that is the case 
    if is_tar_file(fid_list):
      fid_list, itypes = get_all_files_from_tar(fid_list, sort=sort)
    else:
      itypes = [[itype]*len(fid_list)][0]

    for i,fname in enumerate(fid_list):

      kwargs = kwargs_all['kwargs'][i] if 'kwargs' in kwargs_all.keys() else kwargs_all
      qc = main_read(fname, itype=itypes[i], all_mo=all_mo, **kwargs)
      # Geo Section
      if i > 0 and (geo_old != qc.geo_info).sum():
        raise IOError('qc.geo_info has changed!')
      else:
        geo_old = deepcopy(qc.geo_info)
      self.geo_spec_all.append(qc.geo_spec)
      # AO Section
      if (i > 0 and not
          numpy.alltrue([numpy.allclose(ao_old[j]['coeffs'],qc.ao_spec[j]['coeffs'])
                        for j in range(len(ao_old))]
                        )):
          raise IOError('qc.ao_spec has changed!')
      else:
          ao_old = deepcopy(qc.ao_spec)

      # MO Section
      sym_tmp = {}    
      self.MO_Spec.append(qc.mo_spec)
      for i,mo in enumerate(qc.mo_spec):
        if nosym:
          qc.mo_spec[i]['sym'] = '%d.1' % (i+1)
        key = mo['sym'].split('.')
        if key[1] not in sym_tmp.keys():
          sym_tmp[key[1]] = 0
          n_ao[key[1]] = len(qc.mo_spec[0]['coeffs'])
        sym_tmp[key[1]] += 1

      for k,it in sym_tmp.items():
        if k in sym_list:
          sym_list[k] = max(sym_list[k],it)
        else:
          sym_list[k] = it
    
    self.geo_spec_all = numpy.array(self.geo_spec_all)
    self.geo_info = qc.geo_info
    self.ao_spec = qc.ao_spec

    # Presorting of the MOs according to their self.symmetry 
    n_r = len(fid_list)
    self.sym = []
    for k in sorted(sym_list.keys()):
      it = sym_list[k]
      self.sym.append((k,len(self.sym)))
      self.mo_coeff_all.append(numpy.zeros((n_r,it,n_ao[k])))
      self.mo_energy_all.append(numpy.zeros((n_r,it)))
      self.mo_occ_all.append(numpy.zeros((n_r,it)))

    self.sym = odict(self.sym)

    for i,spec in enumerate(self.MO_Spec):
      for j,mo in enumerate(spec):
        index,k = mo['sym'].split('.')
        
        index = int(index)-1
        
        self.mo_coeff_all[self.sym[k]][i,index,:] = mo['coeffs']
        self.mo_energy_all[self.sym[k]][i,index] = mo['energy']
        self.mo_occ_all[self.sym[k]][i,index] = mo['occ_num']
    return
Exemple #39
0
def ParseCommandLine(d):
    d["-C"] = " "  # Separation string for glob patterns
    d["-D"] = False  # Print documentation files
    d["-L"] = False  # Follow directory soft links
    d["-P"] = False  # Print picture files
    d["-S"] = False  # Print source code files
    d["-c"] = False  # Color code the output
    d["-d"] = False  # Show directories only
    d["-f"] = False  # Show files only
    d["-h"] = False  # Show hidden files/directories
    d["-i"] = False  # Case-sensitive search
    d["-e"] = []  # Only list files with these glob patterns
    d["-l"] = -1  # Limit to this number of levels (-1 is no limit)
    d["-p"] = False  # Show python files
    d["-r"] = False  # Don't recurse into directories
    d["-s"] = False  # Sort the output directories and files
    d["-x"] = []  # Ignore files with these glob patterns
    d["-V"] = []  # Revision control directories to include
    if len(sys.argv) < 2:
        Usage(d)
    try:
        optlist, args = getopt.getopt(sys.argv[1:],
                                      "C:DLPScde:fhil:prsVx:",
                                      longopts="S git hg rcs".split())
    except getopt.GetoptError as str:
        msg, option = str
        print(msg)
        exit(1)
    for opt in optlist:
        if opt[0] == "-C":
            d["-C"] = opt[1]
        elif opt[0] == "-D":
            d["-D"] = True
            d["-e"] += documentation_files
        elif opt[0] == "-h":
            d["-h"] = True
        elif opt[0] == "-i":
            d["-i"] = True
        elif opt[0] == "-L":
            d["-L"] = not d["-L"]
        elif opt[0] == "-P":
            d["-P"] = True
            d["-e"] += picture_files
        elif opt[0] == "-S":
            d["-S"] = True
            d["-e"] += source_code_files
        elif opt[0] == "-c":
            d["-c"] = not d["-c"]
        elif opt[0] == "-d":
            d["-d"] = not d["-d"]
        elif opt[0] == "-f":
            d["-f"] = not d["-f"]
        elif opt[0] == "-e":
            d["-e"] += opt[1].split(d["-C"])
        elif opt[0] == "-l":
            n = int(opt[1])
            if n < 0:
                raise ValueError("-l option must include number >= 0")
            d["-l"] = n
        elif opt[0] == "-p":
            d["-p"] = not d["-p"]
            d["-e"] += ["*.py"]
        elif opt[0] == "-r":
            d["-r"] = not d["-r"]
        elif opt[0] == "-s":
            d["-s"] = not d["-s"]
        elif opt[0] == "-V":
            d["-h"] = True
            d["-V"] = version_control
        elif opt[0] == "-x":
            s, c = opt[1], d["-C"]
            d["-x"] += opt[1].split(d["-C"])
        # Long options
        elif opt[0] == "--S":
            d["-S"] = True
            d["-e"] += source_code_files_long
        elif opt[0] == "--hg":
            d["-h"] = True
            d["-V"] += ["hg"]
        elif opt[0] == "--git":
            d["-h"] = True
            d["-V"] += ["git"]
        elif opt[0] == "--rcs":
            d["-h"] = True
            d["-V"] += ["RCS"]
    if len(args) < 1:
        Usage(d)
    if d["-i"]:
        d["regex"] = re.compile(args[0])
    else:
        d["regex"] = re.compile(args[0], re.I)
    args = args[1:]
    if len(args) == 0:
        args = ["."]
    # Store search information in order it was found
    d["search"] = odict()
    # Normalize -V option
    d["-V"] = list(sorted(list(set(d["-V"]))))
    return args
Exemple #40
0
    def __init__(
            self,  # nosec
            tokenizer: Optional[Tokenizer] = None,
            lower: bool = False,
            pad_token: Optional[str] = '<pad>',
            unk_token: str = '<unk>',
            sos_token: Optional[str] = None,
            eos_token: Optional[str] = None,
            embeddings_info: Optional[EmbeddingsInformation] = None,
            embeddings: Optional[str] = None,
            embeddings_format: str = 'glove',
            embeddings_binary: bool = False,
            unk_init_all: bool = False,
            drop_unknown: bool = False,
            max_seq_len: Optional[int] = None,
            truncate_end: bool = False,
            setup_all_embeddings: bool = False) -> None:
        """Initialize the TextField.

        Parameters
        ----------
        tokenizer : Tokenizer, optional
            Tokenizer to use, by default WordTokenizer()
        lower : bool, optional
            If given, lowercase the input, by default False
        pad_token : str, optional
            Reserved padding token. Note that this object does not
            perform padding. Padding is done on the fly, when sampling.
            (defaults to '<pad>')
        unk_token : str, optional
            The token to use for out of vocabulary tokens
            (defaults to '<unk>')
        sos_token : str, optional
            Start of sentence tokens to add to the start of
            each sequence (defaults to '<sos>')
        eos : Iterable[str], optional
            List of end of sentence tokens to add to the end of each
            sequence (defaults to an empty list)
        embeddings_info : EmbeddingsInformation, optional
            The embeddings information. By default None
        embeddings : str
            WIlL BE DEPRECATED SOON. USE 'from_embeddings'
            FACTORY INSTEAD.
            Path to pretrained embeddings or the embedding name
            in case format is gensim.
        embeddings_format : str, optional
            WIlL BE DEPRECATED SOON. USE 'from_embeddings'
            FACTORY INSTEAD.
            The format of the input embeddings, should be one of:
            'glove', 'word2vec', 'fasttext' or 'gensim'. The latter can
            be used to download embeddings hosted on gensim on the fly.
            See https://github.com/RaRe-Technologies/gensim-data
            for the list of available embedding aliases.
        embeddings_binary : bool, optional
            WIlL BE DEPRECATED SOON. USE 'from_embeddings'
            FACTORY INSTEAD.
            Whether the input embeddings are provided in binary format,
            by default False
        unk_init_all : bool, optional
            If True, every token not provided in the input embeddings is
            given a random embedding from a normal distribution.
            Otherwise, all of them map to the '<unk>' token.
        drop_unknown: bool
            WIlL BE DEPRECATED SOON. USE 'from_embeddings'
            FACTORY INSTEAD.
            Whether to drop tokens that don't have embeddings
            associated. Defaults to True.
            Important: this flag will only work when using embeddings.
        max_seq_len: int, optional
            The maximum length possibly output by the process func.
            If len of input tokens is larger than this number - then
            the output will be truncated as a post processing step.
        truncate_end: bool
            Determines the window of observed text in process if the
            input is larger than max_seq_len. If this value is True
            the window starts from the end of the utterance.
            Defaults to False.

            example: max_seq_len=3, input_text=1 2 3 4 5
            truncate_end=false: output=1 2 3
            truncate_end=true: output=3 4 5
        setup_all_embeddings: bool
            WIlL BE DEPRECATED SOON. USE 'from_embeddings'
            FACTORY INSTEAD.
            Controls if all words from the optional provided
            embeddings will be added to the vocabulary and to the
            embedding matrix. Defaults to False.

        """
        if embeddings:
            if embeddings_info:
                raise ValueError(
                    "Cannot submit embeddings information and use the embeddings parameters"
                    +
                    "simultaneously. Use the 'from_embeddings' factory instead."
                )

            warnings.warn(
                "The embeddings-exclusive parameters " +
                "('embeddings', 'embeddings_format', 'embeddings_binary', " +
                "'setup_all_embeddings', 'drop_unknown', 'unk_init_all') " +
                "will be deprecated in a future release. " +
                "Please migrate to use the 'from_embeddings' factory.")

            embeddings_info = EmbeddingsInformation(
                embeddings=embeddings,
                embeddings_format=embeddings_format,
                embeddings_binary=embeddings_binary,
                build_vocab_from_embeddings=setup_all_embeddings,
                unk_init_all=unk_init_all,
                drop_unknown=drop_unknown)

        self.tokenizer = tokenizer or WordTokenizer()
        self.lower = lower

        self.pad = pad_token
        self.unk = unk_token
        self.sos = sos_token
        self.eos = eos_token

        self.embeddings_info = embeddings_info

        self.embedding_matrix: Optional[torch.Tensor] = None

        self.max_seq_len = max_seq_len
        self.truncate_end = truncate_end

        self.unk_numericals: Set[int] = set()

        self.vocab: Dict = odict()
        specials = [pad_token, unk_token, sos_token, eos_token]
        self.specials = [
            special for special in specials if special is not None
        ]

        self.register_attrs('vocab')
Exemple #41
0
 def __init__(self, nz, ngf, nc=3):
     super(GAN_G, self).__init__()
     self.main = nn.Sequential()
     self.layers = odict([
         #block 0
         ('convT_0',
          nn.ConvTranspose2d(nz,
                             ngf * 64,
                             4,
                             stride=2,
                             padding=1,
                             bias=False)),
         ('bn_0', nn.BatchNorm2d(ngf * 64)),
         ('act_0', nn.LeakyReLU(inplace=True)),
         #block 1
         ('convT_1',
          nn.ConvTranspose2d(ngf * 64,
                             ngf * 64,
                             4,
                             stride=2,
                             padding=1,
                             bias=False)),
         ('bn_1', nn.BatchNorm2d(ngf * 64)),
         ('act_1', nn.LeakyReLU(inplace=True)),
         #block 2
         ('convT_2',
          nn.ConvTranspose2d(ngf * 64,
                             ngf * 64,
                             4,
                             stride=2,
                             padding=1,
                             bias=False)),
         ('bn_2', nn.BatchNorm2d(ngf * 64)),
         ('act_2', nn.LeakyReLU(inplace=True)),
         #block 3
         ('convT_3',
          nn.ConvTranspose2d(ngf * 64,
                             ngf * 32,
                             4,
                             stride=2,
                             padding=1,
                             bias=False)),
         ('bn_3', nn.BatchNorm2d(ngf * 32)),
         ('act_3', nn.LeakyReLU(inplace=True)),
         #block 4
         ('convT_4',
          nn.ConvTranspose2d(ngf * 32,
                             ngf * 16,
                             4,
                             stride=2,
                             padding=1,
                             bias=False)),
         ('bn_4', nn.BatchNorm2d(ngf * 16)),
         ('act_4', nn.LeakyReLU(inplace=True)),
         #block 5
         ('convT_5',
          nn.ConvTranspose2d(ngf * 16,
                             ngf * 8,
                             4,
                             stride=2,
                             padding=1,
                             bias=False)),
         ('bn_5', nn.BatchNorm2d(ngf * 8)),
         ('act_5', nn.LeakyReLU(inplace=True)),
         #block 6
         ('convT_6',
          nn.ConvTranspose2d(ngf * 8,
                             ngf * 8,
                             4,
                             stride=2,
                             padding=1,
                             bias=False)),
         ('bn_6', nn.BatchNorm2d(ngf * 8)),
         ('act_6', nn.LeakyReLU(inplace=True)),
         #block 7
         ('convT_7',
          nn.ConvTranspose2d(ngf * 8,
                             ngf * 8,
                             4,
                             stride=2,
                             padding=1,
                             bias=False)),
         ('bn_7', nn.BatchNorm2d(ngf * 8)),
         ('act_7', nn.LeakyReLU(inplace=True)),
         #block 8
         ('convT_8',
          nn.ConvTranspose2d(ngf * 8,
                             ngf * 8,
                             4,
                             stride=2,
                             padding=1,
                             bias=False)),
         ('bn_8', nn.BatchNorm2d(ngf * 8)),
         ('act_8', nn.LeakyReLU(inplace=True)),
         #block 9
         ('convT_9',
          nn.ConvTranspose2d(ngf * 8,
                             nc,
                             4,
                             stride=2,
                             padding=1,
                             bias=False)),
         ('act_9', nn.LeakyReLU(inplace=True)),
     ])
     self.tanh = nn.Tanh()
     self.sigmoid = nn.Sigmoid()
Exemple #42
0
def recreate_OrderedDict(name, values):
    return odict(values['items'])
Exemple #43
0
 def construct_mapping(loader, node):
     loader.flatten_mapping(node)
     return odict(loader.construct_pairs(node))
__author__ = "Alex Drlica-Wagner"
__email__ = "*****@*****.**"
__version__ = "UNKNOWN"

import os, sys
import csv
from collections import OrderedDict as odict
import copy
import re
import logging

import numpy as np

#MUNICH HACK (shouldn't be necessary any more)
HACK = odict([
    #('Ludwig-Maximilians-Universit',r'Department of Physics, Ludwig-Maximilians-Universit\"at, Scheinerstr.\ 1, 81679 M\"unchen, Germany')
])


def check_umlaut(lines):
    """Check for unescaped umlaut characters in quoted strings."""
    #This is a problem:
    #  ...,"Universit\"ats-Sternwarte, Fakult\"at f\"ur Physik"
    #While this is not:
    #  Gruen,Daniel,D.~Gr\"un,...

    # The unescaped umlaut pattern: \" and not \""
    umlaut = re.compile(r'\\"(?!")')
    # The start of a quoted string: ,"
    quote = re.compile(r',"')
Exemple #45
0
 def __init__(self, root, file_pattern='*.txt', black_list=None):
     self.root = root
     self.file_pattern = file_pattern
     self.black_list = tuple(black_list) or tuple([])
     self.rules = odict()
Exemple #46
0
 def __init__(self):
     self.filename = None
     self.meta = odict()
     self.data = odict()
     self.pack = []  # A list of dicts: {"meta":{}, "data":{}}
Exemple #47
0
def main(argv):
    args = parse_argv(argv[1:])

    # set e-mail for identification to NCBI
    Entrez.email = args.email

    # repo directory
    makedirs(args.repo.parent, exist_ok=True)
    # repo database
    path_db = args.repo.with_suffix(".db")
    # repo log
    path_log = args.repo.with_suffix(".log")
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s %(message)s",
        datefmt="%Y-%m-%dT%H:%M:%S",
        handlers=(logging.FileHandler(path_log), logging.StreamHandler()),
    )
    logging.info(argv)

    # metadata
    accs, fdat, mdat = set(), {}, ""
    db, rettype, baseterm = args.db, args.rettype, args.term
    if path_db.exists():
        with sqlite3.connect(path_db) as conn:
            # 0 -> key
            accs = {
                row[0]
                for row in conn.execute("SELECT key FROM offset_data")
            }
            # file_number -> name
            fdat = odict(conn.execute("SELECT * FROM file_data"))
            # key -> value
            meta = odict(conn.execute("SELECT * FROM meta_data"))
            # override args if the index database has metadata
            # mdat is the previous query execution start time
            db = meta.get("db", db)
            mdat = meta.get("mdat", mdat)
            rettype = meta.get("format", rettype)
            baseterm = meta.get("term", baseterm)

    # remote - local accessions
    term = baseterm + (f" AND {mdat}:{MAX_MDAT}[MDAT]" if mdat else "")
    logging.info(term)
    now = datetime.now().strftime("%Y/%m/%d")
    remote_accs = set(chain.from_iterable(esearch_accs(db, term, args.retmax)))
    accs = list(remote_accs - accs)
    logging.info(f"count = {len(accs)}")

    paths = []
    width = len(str(len(accs)))
    for i, j in enumerate(range(0, len(accs), args.retmax), start=1):
        # fetch
        k = min(len(accs), j + args.retmax)
        csv = ",".join(accs[j:j + args.retmax])
        with Entrez.efetch(db, id=csv, rettype=rettype,
                           retmode="text") as handle:
            path = args.repo.parent / f"{args.repo.name}-{i}.{rettype}.bgz.tmp"
            # compress
            with BgzfWriter(path) as stream:
                print(handle.read(), file=stream)
        paths.append(path)
        logging.info(
            f"{j:0{width}} - {k:0{width}} {k / len(accs) * 100:06.2f}%")

    # truthy indicates new accessions
    if paths:
        # combine previous files with new ones
        paths = [args.repo.parent / ele for ele in fdat.values()] + paths
        # rename with zero-fill
        width = len(str(len(paths)))
        paths = {
            ele:
            ele.with_name(f"{args.repo.name}-{idx:0{width}}.{rettype}.bgz")
            for idx, ele in enumerate(paths, start=1)
        }
        for key, val in paths.items():
            if key != val:
                logging.info(f"{key} -> {val}")
                key.rename(val)
        try:
            path_tmp = path_db.with_suffix(".tmp")
            path_tmp.exists() and path_tmp.unlink()
            print("index...")
            SeqIO.index_db(str(path_tmp), list(map(str, paths.values())),
                           rettype)
            # update metadata
            with sqlite3.connect(path_tmp) as conn:
                conn.execute(
                    "INSERT INTO meta_data VALUES ('db', ?), ('term', ?), ('mdat', ?)",
                    (db, baseterm, now),
                )
            path_tmp.rename(path_db)
        except Exception as e:
            logging.error(e)
            # revert original path names
            for key, val in paths.items():
                logging.info(f"{val} -> {key}")
                val.exists() and val.rename(key)

    return 0
Exemple #48
0
                #   aggregation performance (both --distributed and
                #   --cache=persist were provided)
                res = DASK_CLIENT.persist(res)
                distributed.wait(res)
            else:
                if DEBUG:
                    print("DEBUG: Force-loading Dask dataframe", flush=True)
                res = res.persist()

    end = time.time()

    return end - start, res


read = odict([
    (f, odict()) for f in
    ["parq", "snappy.parq", "gz.parq", "bcolz", "feather", "h5", "csv"]
])


def read_csv_dask(filepath, usecols=None):
    # Pandas writes CSV files out as a single file
    if os.path.isfile(filepath):
        return dd.read_csv(filepath, usecols=usecols)
    # Dask may have written out CSV files in partitions
    filepath_expr = filepath.replace('.csv', '*.csv')
    return dd.read_csv(filepath_expr, usecols=usecols)


read["csv"]["dask"] = lambda filepath, p, filetype: benchmark(
    read_csv_dask, (filepath, Kwargs(usecols=p.columns)), filetype)
read["h5"]["dask"] = lambda filepath, p, filetype: benchmark(
Exemple #49
0
    else:
        pixels = sorted([
            p for p in range(npix)
            if len(glob.glob(y1a1dir + '/*%05d.fits' % p))
            and len(glob.glob(y2q1dir + '/*%05d.fits' % p))
        ])

    if len(pixels) == 0:
        msg = "Invalid pixel: %s" % opts.pix
        raise Exception(msg)

    args = [pix for pix in pixels]
    p = Pool(maxtasksperchild=1)
    out = p.map(residuals, args)

median_skymaps = odict()
mean_skymaps = odict()
std_skymaps = odict()
for band in BANDS:
    median_skymap = blank(nside)
    mean_skymap = blank(nside)
    std_skymap = blank(nside)
    for pix, val in out:
        median_skymap[pix] = val[band][0]
        mean_skymap[pix] = val[band][1]
        std_skymap[pix] = val[band][2]
    median_skymaps[band] = median_skymap
    mean_skymaps[band] = mean_skymap
    std_skymaps[band] = std_skymap

for band in BANDS:
Exemple #50
0
    2: [3],
    3: [2],
    4: [999],
    5: [4],
    6: [5],
    7: [6, 16, 17, 18, 19],
    8: [7, 8, 9],
    9: [10],
    10: [11, 12],
    11: [13],
}

#lines= open(ifile,'r').readlines()

oldcc = -1
newrow = odict()
with open(ifile, 'r') as f:
    start = True
    for row in f:
        if start:
            print(row, end='')
            out.write(row)
            if row.startswith('#DATA'): start = False
            continue
        fields = row.split(',')
        cc = int(fields[0])
        if cc != oldcc:
            newrow[cc] = dict()
            oldcc = cc
        snap = int(fields[1])
        if snap == 4: continue
Version__Length = pack('!B', JoinHalvs(HalfBit(4, 'IP Version')+HalfBit(5, 'Header Length')))
TypeOfService = pack('!B', 0) # 8 bit (1 byte)
TotalLength = pack('!H', 52) # 16 bit (2 byte)
Identification = pack('!H', randint(30000, 60000))
## Since flags is just 3 bits, and fragment offset is 13, we need to creatively join the two.
## Since the length of this particular package isn't above 1, it will fuse with "Flags" thus
## enabling us to just pad with a blank 8 bit at the end :) Ugly hack, will work this away once
## we rebuild this to work with binary stuff.
Flags__FragmentOffset = pack('!B', JoinHalvs(HalfBit(4, 'Flags: 0100')+HalfBit(0, 'Fragment offset'))) + pack('!B', 0)
TTL = pack('!B', 128)
Protocol = pack('!B', 6) # TCP
Checksum_Good__Bad = pack('!B', 0) + pack('!B', 0) # Validation disabled
source = socket.inet_aton ( source_ip )
destination = socket.inet_aton ( dest_ip )

_map = odict()
_map['version'] = 1
_map['Type of service'] = 1
_map['Total Length'] = 2
_map['Identification'] = 2
_map['Flags'] = 1
_map['Fragment Offset'] = 1
_map['TTL'] = 1
_map['Protocol'] = 1
_map['Checksum'] = 2
_map['source'] = 4
_map['destination'] = 4
IP_package = Version__Length+TypeOfService+TotalLength+Identification+Flags__FragmentOffset+TTL+Protocol+Checksum_Good__Bad+source+destination

print('IP Header:')
humanHex(hexdump(IP_package), _map)
Exemple #52
0
class nesi_gen(object):
    def __init__(self, gen_obj_list):
        self.gen_obj_list = gen_obj_list
        # note: gen dir should contain only generated files, so that stale
        # generated files can be detected/removed. that is, any file
        # present but not generated inside gen_dir will be removed.
        self.gen_dir = 'gen'
        self.gen_fns = set()
        for fn in gen_obj_list.gen_fns:
            self.gen_fns.add(fn)  # track files generated by gen_obj_list

        # codegen data
        self.cinfos = odict()
        self.tinfos = odict()

        try:
            os.mkdir(self.gen_dir)
        except OSError, e:
            pass
        check_dir_writable(self.gen_dir)

        # scan all files and cache NESI commands
        for obj_list_fn in self.gen_obj_list.obj_list_fns:
            obj_dir = os.path.dirname(obj_list_fn)
            self.src_dir = join(obj_dir, '..', 'src')
            for root, dirs, fns in os.walk(self.src_dir):
                for fn in fns:
                    self.proc_fn(root, fn)
                dirs[:] = []

        # convert cinfo.bases from names to objects and fill in bases and derived types lists
        for cinfo in self.cinfos.itervalues():
            base_names = cinfo.bases
            cinfo.bases = []
            for btn in base_names:
                bt = self.cinfos.get(btn, None)
                if bt is None:
                    raise RuntimeError(
                        "NESI type %r listed %r as its base_type, but that is not a NESI type"
                        % (cinfo.cname, btn))
                cinfo.bases.append(bt)
                bt.derived.append(cinfo)

        # sort derived by cname
        for cinfo in self.cinfos.itervalues():
            cinfo.derived.sort(key=attrgetter('cname'))

        # populate tinfos and cinfos for NESI structs
        #for cinfo in self.cinfos.itervalues():
        #    self.tinfos.setdefault( cinfo.cname, tinfo_t( cinfo.src_fn, len(self.tinfos), cinfo.cname ) )

        # populate tinfos for any remaining types (vector, pointer, and base types)
        for cinfo in self.cinfos.itervalues():
            tnames = [(var, var.tname) for var in cinfo.vars_list]
            # include an entry for the class itself, so that we'll
            # generate the p_ and vect_p_ wrappers for all classes
            # (via the wts.extend() below) even if they are not used
            # anywhere as NESI vars themselves:
            tnames.append((None, cinfo.cname))
            for var, tname in tnames:
                wts = list(iter_wrapped_types(tname))
                assert len(wts)
                lt = wts[0]  # leaf / least-derived type
                src_fn = None
                if lt in self.cinfos:
                    src_fn = self.cinfos[lt].src_fn
                    wts[0:1] = []
                    # optional: always add p_ and vect_p_ tinfos for nesi types
                    wts.extend(["p_" + lt, "vect_p_" + lt])
                else:
                    src_fn = "nesi.cc"
                for wt in wts:
                    if wt in self.tinfos:
                        continue
                    ti = tinfo_t(wt, src_fn)
                    self.tinfos[wt] = ti

                # check no_init_okay restrictions
                no_init_okay = None
                if tname in self.cinfos:
                    no_init_okay = 1
                elif tname in self.tinfos:
                    no_init_okay = self.tinfos[tname].no_init_okay
                assert not (no_init_okay is None
                            )  # type must must be struct or other

                if not no_init_okay:  # i.e. not a pointer type
                    if (var.req) and (var.default is not None):
                        raise RuntimeError(
                            "field %s of struct %s is marked as required, but has a default value. this is confusing. either make the field not required or remove its default value."
                            % (var.vname, cinfo.cname))
                    if (not var.req) and (var.default is None):
                        raise RuntimeError(
                            "field %s of struct %s is optional (not required and has no default), but is not a pointer, vector, or struct type. only pointer, vector, or struct types may be optional; specify a default, make the field required, or change the type. for example, you could prefix the type name with p_ to make it a pointer (and in that case be sure to check it is not-NULL before use)."
                            % (var.vname, cinfo.cname))

        # create per-file generated code files
        per_file_gen = odict()
        nesi_decls_inc = '#include "../../src/nesi_decls.H"\n'
        for cinfo in self.cinfos.itervalues():
            gf = per_file_gen.setdefault(cinfo.src_fn, [nesi_decls_inc])
            gf.append(cinfo.gen_get_field())
            gf.append(cinfo.gen_vinfos_predecls())
            gf.append(cinfo.gen_vinfos())
            gf.append(cinfo.gen_cinfo())

        for tinfo in self.tinfos.itervalues():
            gf = per_file_gen.setdefault(tinfo.src_fn, [nesi_decls_inc])
            gf.append(tinfo.get_tinfo())

        for gfn, gfn_texts in per_file_gen.iteritems():
            self.update_file_if_different(gfn + '.nesi_gen.cc',
                                          "".join(gfn_texts))

        enabled_features = [
            dep.name for dep in self.gen_obj_list.deps_list if dep.enable
        ]
        self.update_file_if_different('build_info.cc',
                                      get_build_info_c_str(enabled_features))

        self.remove_stale_files()
        print "wrappers up to date."
Exemple #53
0
#!/usr/bin/env python
import numpy as np
import sncosmo
from collections import OrderedDict as odict
from astropy.table import Table

model = sncosmo.Model(source='salt2')
model.set(z=0.5, c=0.2, t0=55100., x1=0.5)
model.set_source_peakabsmag(-19.5, 'bessellb', 'ab')

times = np.linspace(55070., 55150., 40)
bands = np.array(10 * ['sdssg', 'sdssr', 'sdssi', 'sdssz'])
zp = 25. * np.ones(40)
zpsys = np.array(40 * ['ab'])

flux = model.bandflux(bands, times, zp=zp, zpsys=zpsys)
fluxerr = (0.05 * np.max(flux)) * np.ones(40, dtype=np.float)
flux += fluxerr * np.random.randn(40)

data = Table(odict([('time', times), ('band', bands), ('flux', flux),
                    ('fluxerr', fluxerr), ('zp', zp), ('zpsys', zpsys)]),
             meta=dict(zip(model.param_names, model.parameters)))

sncosmo.write_lc(data, 'example_photometric_data.dat')
Exemple #54
0
        self.mcmd = mcmd
        self.geof = geof
        self.key = key
        self.cmdline = cmdline
        self.name = "bench%d" % i

        lab = (df, b.metric, "rfast", "rslow", b.other)
        self.labels = labfmt_(lab)

        a, d, r = self.make_a()

        self.a = a
        self.d = d
        self.r = r


if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO)

    ratios = odict()
    ratios["R0/1_TITAN_V"] = "R0_TITAN_V R1_TITAN_V".split()
    ratios["R0/1_TITAN_RTX"] = "R0_TITAN_RTX R1_TITAN_RTX".split()
    ratios["R1/0_TITAN_V"] = "R1_TITAN_V R0_TITAN_V".split()
    ratios["R1/0_TITAN_RTX"] = "R1_TITAN_RTX R0_TITAN_RTX".split()

    args = Bench.Args()
    args.ratios = ratios

    b = Bench(args)
    print(b)
Exemple #55
0
import sys
import argcomplete

from collections import OrderedDict as odict

from c4.cmany.project import Project as Project
from c4.cmany import args as c4args
from c4.cmany import help as c4help

cmds = odict([
    ('help', ['h']),
    ('configure', ['c']),
    ('build', ['b']),
    ('install', ['i']),
    ('run', ['r']),
    ('show_vars', ['sv']),
    ('show_builds', ['sb']),
    ('show_build_names', ['sn']),
    ('show_build_dirs', ['sd']),
    ('show_targets', ['st']),
    ('create_proj', ['cp']),
    ('export_vs', []),
])


def cmany_main(in_args=None):
    if in_args is None:
        in_args = sys.argv[1:]
    mymod = sys.modules[__name__]
    parser = c4args.setup(cmds, mymod)
    argcomplete.autocomplete(parser)
    args = c4args.parse(parser, in_args)
Exemple #56
0
    def odict(self):

        error_odict = odict()
        error_odict['message'] = self.message

        return error_odict
Exemple #57
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 22 17:36:46 2019

Settings for launch_jobs.py
Example settings

@author: Matthias Göbel

"""
from collections import OrderedDict as odict
from run_wrf.configs.config import *
from copy import deepcopy
params = deepcopy(params)
param_combs = None


# %%

runID = "sfclay"  # name for this simulation series

param_grid = odict(sf_sfclay_physics=[1, 2, 5])

params["dx"] = 500  # horizontal grid spacing x-direction(m)
params["dy"] = None  # horizontal grid spacing y-direction (m), if None: dy = dx
Exemple #58
0
    def __init__(
        self,
        in_nc=128,
        out_proj_nc=16,
        conv_ncs=[128, 128, 64, 64, 32],
        conv_ss=[1, 1, 1, 1, 1, 1],
        conv_gns=[16, 16, 16, 8, 8],
        conv_kss=[1, 3, 3, 3, 3],
        conv_pds=[0, 1, 1, 1, 1],
        sub_conv_factors=[2, 2, 2, 2, 2],
        sub_conv_ncs=[128, 64, 64, 32, 16],
        sub_conv_ss=[1, 1, 1, 1, 1],
        sub_conv_gns=[16, 16, 8, 8, 4],
        out_proj_dim=16,
        out_proj_ks=3,
        out_proj_stride=1,
        act="celu",
    ):
        super().__init__()

        act = nn.LeakyReLU(0.2)
        conv_layers = odict([])
        prev_num_channels = in_nc
        for i, (nc, ks, s, gn, pd, sc_f, sc_nc, sc_s, sc_gn) in enumerate(
                zip(
                    conv_ncs,
                    conv_kss,
                    conv_ss,
                    conv_gns,
                    conv_pds,
                    sub_conv_factors,
                    sub_conv_ncs,
                    sub_conv_ss,
                    sub_conv_gns,
                )):
            conv_layers[f"group_conv{i}"] = nn.Sequential(
                odict([
                    (
                        f"conv{i}",
                        nn.Sequential(
                            nn.Conv2d(
                                in_channels=prev_num_channels,
                                out_channels=nc,
                                kernel_size=ks,
                                stride=s,
                                padding=pd,
                            ),
                            nn.GroupNorm(num_groups=gn, num_channels=nc),
                            nn.LeakyReLU(0.2),
                        ),
                    ),
                    (
                        f"sub_conv{i}",
                        nn.Sequential(
                            nn.Conv2d(
                                in_channels=nc,
                                out_channels=sc_nc * sc_f**2,
                                stride=sc_s,
                                kernel_size=1,
                            ),
                            nn.PixelShuffle(sc_f),
                            nn.GroupNorm(num_groups=sc_gn, num_channels=sc_nc),
                            nn.LeakyReLU(0.2),
                        ),
                    ),
                ]))
            prev_num_channels = sc_nc
        self.conv_layers = nn.Sequential(conv_layers)
        self.out_proj = nn.Sequential(
            nn.Conv2d(
                in_channels=prev_num_channels,
                out_channels=out_proj_nc,
                kernel_size=3,
                stride=1,
                padding=1,
            ),
            # nn.GroupNorm(num_groups=out_proj_nc // 4, num_channels=out_proj_nc),
            nn.InstanceNorm2d(num_features=out_proj_nc),
            nn.LeakyReLU(0.2),
            nn.Conv2d(
                in_channels=out_proj_nc,
                out_channels=4,
                kernel_size=1,
            ),
        )
        self.apply(self.weights_init)
        self.out_proj[-1].weight.data.normal_()
Exemple #59
0
 def __init__(self, nc, ndf, nout):
     super(GAN_D, self).__init__()
     self.nout = nout
     self.main = nn.Sequential()
     self.layers = odict([
         #block 0
         #input: (batch x nc x 1024 x 1024)
         ('conv_0',
          nn.Conv2d(nc, ndf * 8, 4, stride=2, padding=1, bias=False)),
         ('act_0', nn.LeakyReLU(0.2, inplace=True)),
         #block 1
         #input: (batch x nc x 512 x 512)
         ('conv_1',
          nn.Conv2d(ndf * 8, ndf * 8, 4, stride=2, padding=1, bias=False)),
         ('bn_1', nn.BatchNorm2d(ndf * 8)),
         ('act_1', nn.LeakyReLU(0.2, inplace=True)),
         #block 2
         #input: (batch x nc x 256 x 256)
         ('conv_2',
          nn.Conv2d(ndf * 8, ndf * 8, 4, stride=2, padding=1, bias=False)),
         ('bn_2', nn.BatchNorm2d(ndf * 8)),
         ('act_2', nn.LeakyReLU(0.2, inplace=True)),
         #block 3
         #input: (batch x nc x 128 x 128)
         ('conv_3',
          nn.Conv2d(ndf * 8, ndf * 8, 4, stride=2, padding=1, bias=False)),
         ('bn_3', nn.BatchNorm2d(ndf * 8)),
         ('act_3', nn.LeakyReLU(0.2, inplace=True)),
         #block 4
         #input: (batch x nc x 64 x 64)
         ('conv_4',
          nn.Conv2d(ndf * 8, ndf * 16, 4, stride=2, padding=1, bias=False)),
         ('bn_4', nn.BatchNorm2d(ndf * 16)),
         ('act_4', nn.LeakyReLU(0.2, inplace=True)),
         #block 5
         #input: (batch x nc x 32 x 32)
         ('conv_5',
          nn.Conv2d(ndf * 16, ndf * 32, 4, stride=2, padding=1,
                    bias=False)),
         ('bn_5', nn.BatchNorm2d(ndf * 32)),
         ('act_5', nn.LeakyReLU(0.2, inplace=True)),
         #block 6
         #input: (batch x nc x 16 x 16)
         ('conv_6',
          nn.Conv2d(ndf * 32, ndf * 64, 4, stride=2, padding=1,
                    bias=False)),
         ('bn_6', nn.BatchNorm2d(ndf * 64)),
         ('act_6', nn.LeakyReLU(0.2, inplace=True)),
         #block 7
         #input: (batch x nc x 8 x 8)
         ('conv_7',
          nn.Conv2d(ndf * 64, ndf * 64, 4, stride=2, padding=1,
                    bias=False)),
         ('bn_7', nn.BatchNorm2d(ndf * 64)),
         ('act_7', nn.LeakyReLU(0.2, inplace=True)),
         #block 8
         #input: (batch x nc x 4 x 4)
         ('conv_8',
          nn.Conv2d(ndf * 64, ndf * 64, 4, stride=2, padding=1,
                    bias=False)),
         ('bn_8', nn.BatchNorm2d(ndf * 64)),
         ('act_8', nn.LeakyReLU()),
         #block9
         #input: (batch x nc x 2 x 2)
         ('conv_9',
          nn.Conv2d(ndf * 64, nout, 2, stride=2, padding=0, bias=False)),
         ('act_9', nn.LeakyReLU()),
     ])
Exemple #60
0
    'flt': '',
    'bin': '',
    'imf': 1,
    'pls': '',
    'lnm': '',
    'lns': '',
}

###########################################################
# MESA Isochrones
# http://waps.cfa.harvard.edu/MIST/iso_form.php

# survey system
dict_output = odict([
    ('des', 'DECam'),
    ('sdss', 'SDSSugriz'),
    ('ps1', 'PanSTARRS'),
])

mesa_defaults = {
    'version': '1.0',
    'v_div_vcrit': 'vvcrit0.4',
    'age_scale': 'linear',
    'age_type': 'single',
    'age_value': 10e9,  # yr if scale='linear'; log10(yr) if scale='log10'
    'age_range_low': '',
    'age_range_high': '',
    'age_range_delta': '',
    'age_list': '',
    'FeH_value': -3.0,
    'theory_output': 'basic',