def get_dump_object(self, obj): data = OrderedDict() if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'): data["pk"] = force_text(obj._get_pk_val(), strings_only=True) data.update(self._current) # TODO: обработка методов # for field in concrete_model._meta.many_to_many: # if field.serialize: # if self.selected_fields is None or field.attname in self.selected_fields: # self.handle_m2m_field(obj, field) return data
def test_object_pairs_hook(self): s = '{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}' p = [("xkd", 1), ("kcw", 2), ("art", 3), ("hxm", 4), ("qrt", 5), ("pad", 6), ("hoy", 7)] self.assertEqual(json.loads(s), eval(s)) self.assertEqual(json.loads(s, object_pairs_hook=lambda x: x), p) self.assertEqual(json.load(StringIO(s), object_pairs_hook=lambda x: x), p) od = json.loads(s, object_pairs_hook=OrderedDict) self.assertEqual(od, OrderedDict(p)) self.assertEqual(type(od), OrderedDict) # the object_pairs_hook takes priority over the object_hook self.assertEqual( json.loads(s, object_pairs_hook=OrderedDict, object_hook=lambda x: None), OrderedDict(p))
def render_notification_block(config, env_key, skip_envs=None): j2 = get_datadog_jinja_environment() template = j2.get_template('notification_block.j2') envs = config.env_notifications if skip_envs: # copy but maintain ordering envs = OrderedDict(sorted(config.env_notifications.items())) for env in skip_envs: envs.pop(env, None) return template.render( env_key=env_key, catchall_alert_channel=config.catchall_alert_channel, envs=envs, start_block=BLOCK.format(env_key=env_key, start_or_end='START'), end_block=BLOCK.format(env_key=env_key, start_or_end='END'), )
def load_po(self, path, json_root_key, lang): """ Load content of Gettext PO file (from "path/lang/json_root_key.po") into already loaded JSON content :param path: Root directory :param json_root_key: JSON key from RcgJsonKeys class :param lang: Language. Should be in RcgLanguages class :return: """ if len(self.json_content[json_root_key.value]) == 0: logging.error( "ERROR: {} JSON entry is empty! Forgot to load_json()?".format( json_root_key.value)) return temp_json = OrderedDict([]) temp_json.update({json_root_key.value: []}) language = next(name.value["key"] for name in RcgLanguages if name.value["iso_code"] == lang) po_file = join(path, lang, json_root_key.value + ".po") if exists(po_file): po = pofile(po_file) for entry in po: json_entry = next( item for item in self.json_content[json_root_key.value] if item[LANG_KEY] == entry.msgctxt) if not entry.obsolete and entry.translated( ) and "fuzzy" not in entry.flags: json_entry[language] = entry.msgstr else: # Add entry if there no entry at all if language not in json_entry: json_entry[language] = entry.msgid else: logging.warning( "ERROR: '{}' is not exists! Skipping.".format(po_file)) return
def to_dict(self): _dict = OrderedDict() _dict["NO"] = self.number _dict["DIST_CODE"] = self.dist_code _dict["DISTRICT_NAME"] = self.district_name _dict["EA_CODE"] = self.ea_code _dict["EA_NAME"] = self.ea_name _dict["SUB_COUNTY_CODE"] = self.sub_county_code _dict["SUB_COUNTY_NAME"] = self.sub_county_name _dict["PARISH_CODE"] = self.parish_code _dict["PARISH_NAME"] = self.parish_name _dict["PS_CODE"] = self.ps_code _dict["POLLING_STATION_NAME"] = self.ps_name return _dict
def setUp(self): super(JSONDumpTests, self).setUp() self.doc = OrderedDict([ ("test_runs", []), ("format", "Dashboard Bundle Format 1.0"), ]) self.expected_readable_text = ('{\n "test_runs": [], \n "format": ' '"Dashboard Bundle Format 1.0"\n}') self.expected_readable_sorted_text = ( '{\n "format": "Dashboard Bundle Format 1.0",' ' \n "test_runs": []\n}') self.expected_compact_text = ( '{"test_runs":[],"format":"Dashboard Bundle Format 1.0"}') self.expected_compact_sorted_text = ( '{"format":"Dashboard Bundle Format 1.0","test_runs":[]}')
class SampleTable(object): # Samples always have places and time (periods) # This format is used for daily data and monthly aggregated data. # Performance matters, and we have lots of data, # so unnecessary bytes are shaved as follows: # 1. Sample tables don't need an id - the time and place is the key # 2. The smallest interval is one day, so time_period as smallint (65536) # instead of int, allows a 179 year range, from 1950 to 2129. # Normally we'll be dealing with months however, where this is # even less of an issue. # 3. The value field type can be real, int, smallint, decimal etc. # Double is overkill for climate data. # Take care with decimal though - calculations may be slower. # These tables are not web2py tables as we don't want web2py messing with # them. The database IO is done directly to postgres for speed. # We don't want web2py messing with or complaining about the schemas. # It is likely we will need spatial database extensions i.e. PostGIS. # May be better to cluster places by region. __date_mapper = { "daily": daily, "monthly": monthly } __objects = {} __names = OrderedDict() @staticmethod def with_name(name): return SampleTable.__names[name] __by_ids = {} @staticmethod def with_id(id): SampleTable_by_ids = SampleTable.__by_ids return SampleTable_by_ids[id] @staticmethod def name_exists(name, error): if name in SampleTable.__names: return True else: error( "Available data sets are: %s" % SampleTable.__names.keys() ) return False @staticmethod def matching( parameter_name, sample_type_code ): try: return SampleTable.__objects[(parameter_name, sample_type_code)] except KeyError: pass #print SampleTable.__objects.keys() @staticmethod def add_to_client_config_dict(config_dict): data_type_option_names = [] for SampleTableType in sample_table_types: data_type_option_names.append(SampleTableType.__name__) parameter_names = [] for name, sample_table in SampleTable.__names.iteritems(): if sample_table.date_mapping_name == "monthly": parameter_names.append(name) config_dict.update( data_type_option_names = data_type_option_names, parameter_names = parameter_names ) def __init__( sample_table, db, name, # please change to parameter_name date_mapping_name, field_type, units_name, grid_size, sample_type = None, sample_type_code = None, id = None ): parameter_name = name assert units_name in units_in_out.keys(), \ "units must be one of %s" % units_in_out.keys() assert sample_type is None or sample_type in sample_table_types assert (sample_type is not None) ^ (sample_type_code is not None), \ "either parameters sample_type or sample_type_code must be set" sample_table_type = sample_type or sample_table_types_by_code[sample_type_code] if id is not None: if id in SampleTable.__by_ids: # other code shouldn't be creating SampleTables that already # exist. Or, worse, different ones with the same id. raise Exception( "SampleTable %i already exists. " "Use SampleTable.with_id(%i) instead." % (id, id) ) #return SampleTable.__by_ids[id] else: sample_table.set_id(id) SampleTable.__by_ids[id] = sample_table sample_table.type = sample_table_type sample_table.units_name = units_name sample_table.parameter_name = parameter_name sample_table.date_mapping_name = date_mapping_name sample_table.date_mapper = SampleTable.__date_mapper[date_mapping_name] sample_table.field_type = field_type sample_table.grid_size = grid_size sample_table.db = db SampleTable.__objects[ (parameter_name, sample_table.type.code) ] = sample_table SampleTable.__names["%s %s" % ( sample_table.type.__name__, parameter_name )] = sample_table def __repr__(sample_table): return '%s %s' % ( sample_table.type.__name__, sample_table.parameter_name ) def __str__(sample_table): return '"%s"' % repr(sample_table) @staticmethod def table_name(id): return "climate_sample_table_%i" % id def set_id(sample_table,id): sample_table.id = id sample_table.table_name = SampleTable.table_name(id) def find( sample_table, found, not_found ): db = sample_table.db existing_table_query = db( (db.climate_sample_table_spec.name == sample_table.parameter_name) & (db.climate_sample_table_spec.sample_type_code == sample_table.type.code) ) existing_table = existing_table_query.select().first() if existing_table is None: not_found() else: found( existing_table_query, SampleTable.table_name(existing_table.id), ) def create(sample_table, use_table_name): def create_table(): db = sample_table.db sample_table.set_id( db.climate_sample_table_spec.insert( sample_type_code = sample_table.type.code, name = sample_table.parameter_name, units = sample_table.units_name, field_type = sample_table.field_type, date_mapping = sample_table.date_mapping_name, grid_size = sample_table.grid_size ) ) db.executesql( """ CREATE TABLE %(table_name)s ( place_id integer NOT NULL, time_period smallint NOT NULL, value %(field_type)s NOT NULL, CONSTRAINT %(table_name)s_primary_key PRIMARY KEY (place_id, time_period), CONSTRAINT %(table_name)s_place_id_fkey FOREIGN KEY (place_id) REFERENCES climate_place (id) MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE ); """ % sample_table.__dict__ ) use_table_name(sample_table.table_name) def complain_that_table_already_exists( query, existing_table_name ): raise Exception( "Table for %s %s already exists as '%s'" % ( sample_table.type.__name__, sample_table.parameter_name, existing_table_name ) ) return sample_table.find( not_found = create_table, found = complain_that_table_already_exists ) def create_indices(sample_table): db = sample_table.db for field in ( "time_period", "place_id", "value" ): db.executesql( "CREATE INDEX %(table_name)s_%(field)s__idx " "on %(table_name)s(%(field)s);" % dict( sample_table.__dict__, field = field ) ) use_table_name(sample_table.table_name) def drop(sample_table, use_table_name): db = sample_table.db def complain_that_table_does_not_exist(): raise Exception( "%s %s table not found" % ( sample_table.sample_type_name, sample_table.parameter_name, ) ) def delete_table( existing_table_query, existing_table_name, ): existing_table_query.delete() db.executesql( "DROP TABLE %s;" % existing_table_name ) db.commit() use_table_name(existing_table_name) return sample_table.find( not_found = complain_that_table_does_not_exist, found = delete_table ) def clear(sample_table): sample_table.db.executesql( "TRUNCATE TABLE %s;" % sample_table.table_name ) def insert_values(sample_table, values): sql = "INSERT INTO %s (time_period, place_id, value) VALUES %s;" % ( sample_table.table_name, ",".join(values) ) try: sample_table.db.executesql(sql) except: print sql raise def pull_real_time_data(sample_table): import_sql = ( "SELECT AVG(value), station_id, obstime " "FROM weather_data_nepal " "WHERE parameter = 'T' " "GROUP BY station_id, obstime" "ORDER BY station_id, obstime;" ) sample_table.cldb.executesql( import_sql ) def csv_data( sample_table, place_id, date_from, date_to ): sample_table_id = sample_table.id date_mapper = sample_table.date_mapper start_date_number = date_mapper.date_to_time_period(date_from) end_date_number = date_mapper.date_to_time_period(date_to) data = [ "date,"+sample_table.units_name ] for record in db.executesql( "SELECT * " "FROM climate_sample_table_%(sample_table_id)i " "WHERE time_period >= %(start_date_number)i " "AND place_id = %(place_id)i " "AND time_period <= %(end_date_number)i" "ORDER BY time_period ASC;" % locals() ): place_id, time_period, value = record date_format = { monthly: "%Y-%m", daily: "%Y-%m-%d" }[date_mapper] data.append( ",".join(( date_mapper.to_date(time_period).strftime(date_format), str(value) )) ) data.append("") return "\n".join(data) def get_available_years( sample_table ): years = [] for (year,) in db.executesql( "SELECT sub.year FROM (" "SELECT (((time_period + %(start_month_0_indexed)i) / 12) + %(start_year)i)" " AS year " "FROM climate_sample_table_%(sample_table_id)i " ") as sub GROUP BY sub.year;" % dict( start_year = start_year, start_month_0_indexed = start_month_0_indexed, sample_table_id = sample_table.id ) ): years.append(year) return years
) class Observed(object): code = "O" Observed.__name__ = "Observed Station" class Gridded(object): code = "G" Gridded.__name__ = "Observed Gridded" class Projected(object): code = "P" from simplejson import OrderedDict sample_table_types = (Observed, Gridded, Projected) sample_table_types_by_code = OrderedDict() for SampleTableType in sample_table_types: sample_table_types_by_code[SampleTableType.code] = SampleTableType from gluon import current db = current.db class SampleTable(object): # Samples always have places and time (periods) # This format is used for daily data and monthly aggregated data. # Performance matters, and we have lots of data, # so unnecessary bytes are shaved as follows: # 1. Sample tables don't need an id - the time and place is the key
def get_config(config_path): with open(config_path, 'r', encoding='utf-8') as config_file: config = Config(yaml.safe_load(config_file)) config.env_notifications = OrderedDict( sorted(config.env_notifications.items())) return config
def stop_listening(self): self.detector_sub.unregister() # self.publish_as_marker() return OrderedDict( [x.get_shelf() for x in self.shelves if x.is_complete()])