def read(self, database='project'):
        """
		Read a cons_practice.lum text file into the database.
		:param database: project or datasets
		:return:
		"""
        file = open(self.file_name, "r")

        i = 1
        data = []
        for line in file:
            if i > 2:
                val = line.split()
                self.check_cols(val, 4, 'cons_practice.lum')

                d = {
                    'name': val[0].lower(),
                    'usle_p': val[1],
                    'slp_len_max': val[2],
                    'description': val[3]
                }
                data.append(d)
            i += 1

        if database == 'project':
            db_lib.bulk_insert(project_base.db, db.Cons_prac_lum, data)
        else:
            db_lib.bulk_insert(datasets_base.db, db_datasets.Cons_prac_lum,
                               data)
    def read(self, database='project'):
        """
		Read a cntable.lum text file into the database.
		:param database: project or datasets
		:return:
		"""
        file = open(self.file_name, "r")

        i = 1
        cns = []
        for line in file:
            if i > 2:
                val = line.split()
                self.check_cols(val, 8, 'cntable.lum')

                cn = {
                    'name': val[0].lower(),
                    'cn_a': val[1],
                    'cn_b': val[2],
                    'cn_c': val[3],
                    'cn_d': val[4],
                    'description': val[5],
                    'treat': val[6],
                    'cond_cov': val[7]
                }
                cns.append(cn)
            i += 1

        if database == 'project':
            db_lib.bulk_insert(project_base.db, db.Cntable_lum, cns)
        else:
            db_lib.bulk_insert(datasets_base.db, db_datasets.Cntable_lum, cns)
    def read(self, database='project'):
        """
		Read a ovn_table.lum text file into the database.
		:param database: project or datasets
		:return:
		"""
        file = open(self.file_name, "r")

        i = 1
        data = []
        for line in file:
            if i > 2:
                val = line.split()
                self.check_cols(val, 5, 'ovn_table.lum')

                d = {
                    'name': val[0].lower(),
                    'ovn_mean': val[1],
                    'ovn_min': val[2],
                    'ovn_max': val[3],
                    'description': val[4]
                }
                data.append(d)
            i += 1

        if database == 'project':
            db_lib.bulk_insert(project_base.db, db.Ovn_table_lum, data)
        else:
            db_lib.bulk_insert(datasets_base.db, db_datasets.Ovn_table_lum,
                               data)
Example #4
0
	def read(self, database ='project'):
		"""
		Read a chem_app.ops text file into the database.
		:param database: project or datasets
		:return:
		"""
		file = open(self.file_name, "r")

		i = 1
		data = []
		for line in file:
			if i > 2:
				val = line.split()
				self.check_cols(val, 9, 'chem_app.ops')

				d = {
					'name': val[0].lower(),
					'chem_form': val[1],
					'app_typ': val[2],
					'app_eff': val[3],
					'foliar_eff': val[4],
					'inject_dp': val[5],
					'surf_frac': val[6],
					'drift_pot': val[7],
					'aerial_unif': val[8]
				}
				data.append(d)
			i += 1

		if database == 'project':
			db_lib.bulk_insert(project_base.db, db.Chem_app_ops, data)
		else:
			db_lib.bulk_insert(datasets_base.db, db_datasets.Chem_app_ops, data)
Example #5
0
	def read(self, database ='project'):
		"""
		Read a sweep.ops text file into the database.
		:param database: project or datasets
		:return:
		"""
		file = open(self.file_name, "r")

		i = 1
		data = []
		for line in file:
			if i > 2:
				val = line.split()
				self.check_cols(val, 3, 'sweep.ops')

				d = {
					'name': val[0].lower(),
					'swp_eff': val[1],
					'frac_curb': val[2]
				}
				data.append(d)
			i += 1

		if database == 'project':
			db_lib.bulk_insert(project_base.db, db.Sweep_ops, data)
		else:
			db_lib.bulk_insert(datasets_base.db, db_datasets.Sweep_ops, data)
    def read(self, database='project'):
        """
		Read a filterstrip.str text file into the database.
		:param database: project or datasets
		:return:
		"""
        file = open(self.file_name, "r")

        i = 1
        data = []
        for line in file:
            if i > 2:
                val = line.split()
                self.check_cols(val, 5, 'filterstrip.str')

                d = {
                    'name': val[0].lower(),
                    'flag': 0,
                    'fld_vfs': val[1],
                    'con_vfs': val[2],
                    'cha_q': val[3],
                    'description': val[4]
                }
                data.append(d)
            i += 1

        if database == 'project':
            db_lib.bulk_insert(project_base.db, db.Filterstrip_str, data)
        else:
            db_lib.bulk_insert(datasets_base.db, db_datasets.Filterstrip_str,
                               data)
Example #7
0
	def read(self, database ='project'):
		"""
		Read a harv.ops text file into the database.
		:param database: project or datasets
		:return:
		"""
		file = open(self.file_name, "r")

		i = 1
		data = []
		for line in file:
			if i > 2:
				val = line.split()
				self.check_cols(val, 5, 'harv.ops')

				d = {
					'name': val[0].lower(),
					'harv_typ': val[1],
					'harv_idx': val[2],
					'harv_eff': val[3],
					'harv_bm_min': val[4]
				}
				data.append(d)
			i += 1

		if database == 'project':
			db_lib.bulk_insert(project_base.db, db.Harv_ops, data)
		else:
			db_lib.bulk_insert(datasets_base.db, db_datasets.Harv_ops, data)
    def read(self, database='project'):
        """
		Read a tiledrain.str text file into the database.
		:param database: project or datasets
		:return:
		"""
        file = open(self.file_name, "r")

        i = 1
        data = []
        for line in file:
            if i > 2:
                val = line.split()
                self.check_cols(val, 9, 'tiledrain.str')

                d = {
                    'name': val[0].lower(),
                    'dp': val[1],
                    't_fc': val[2],
                    'lag': val[3],
                    'rad': val[4],
                    'dist': val[5],
                    'drain': val[6],
                    'pump': val[7],
                    'lat_ksat': val[8]
                }
                data.append(d)
            i += 1

        if database == 'project':
            db_lib.bulk_insert(project_base.db, db.Tiledrain_str, data)
        else:
            db_lib.bulk_insert(datasets_base.db, db_datasets.Tiledrain_str,
                               data)
    def read(self, database='project'):
        """
		Read a grassedww.str text file into the database.
		:param database: project or datasets
		:return:
		"""
        file = open(self.file_name, "r")

        i = 1
        data = []
        for line in file:
            if i > 2:
                val = line.split()
                self.check_cols(val, 8, 'grassedww.str')

                d = {
                    'name': val[0].lower(),
                    'flag': 0,
                    'mann': val[1],
                    'sed_co': val[2],
                    'dp': val[3],
                    'wd': val[4],
                    'len': val[5],
                    'slp': val[6],
                    'description': val[7]
                }
                data.append(d)
            i += 1

        if database == 'project':
            db_lib.bulk_insert(project_base.db, db.Grassedww_str, data)
        else:
            db_lib.bulk_insert(datasets_base.db, db_datasets.Grassedww_str,
                               data)
def save_constituents_ini(project_db,
                          ini_table,
                          ini_item_table,
                          rel_col_name,
                          row1='plant',
                          row2='soil'):
    SetupProjectDatabase.init(project_db)
    parser = reqparse.RequestParser()
    parser.add_argument('items', type=list, location='json')
    args = parser.parse_args(strict=False)

    ini_item_table.delete().execute()
    ini_table.delete().execute()

    for item in args['items']:
        m = ini_table(name=item['name'])
        m.save()

        rows = []
        for row in item['rows']:
            rows.append({
                rel_col_name: m.id,
                'name_id': row['name_id'],
                row1: row[row1],
                row2: row[row2]
            })

        db_lib.bulk_insert(project_base.db, ini_item_table, rows)
Example #11
0
    def read_const_data(self):
        with open(self.file_name, mode='r') as csv_file:
            dialect = csv.Sniffer().sniff(csv_file.readline())
            csv_file.seek(0)
            replace_commas = dialect is not None and dialect.delimiter != ','
            csv_reader = csv.DictReader(csv_file, dialect=dialect)
            rows = []
            for row in csv_reader:
                rec = db.Recall_rec.get_or_none(
                    db.Recall_rec.name == row['name'])
                if rec is not None:
                    db.Recall_rec.update(rec_typ=4).where(
                        db.Recall_rec.id == rec.id).execute()
                    db.Recall_dat.delete().where(
                        db.Recall_dat.recall_rec_id == rec.id).execute()

                    if replace_commas:
                        for key in row:
                            row[key] = row[key].replace(',', '.', 1)

                    row['recall_rec_id'] = rec.id
                    row['yr'] = 0
                    row['jday'] = 0
                    row['mo'] = 0
                    row['day_mo'] = 0
                    row['ob_typ'] = 'pt_cnst'
                    row['ob_name'] = row['name']
                    row.pop('name', None)
                    rows.append(row)

            db_lib.bulk_insert(project_base.db, db.Recall_dat, rows)
Example #12
0
    def read_data(self, recall_rec_id, delete_existing, rec_typ):
        with open(self.file_name, mode='r') as csv_file:
            dialect = csv.Sniffer().sniff(csv_file.readline())
            csv_file.seek(0)
            replace_commas = dialect is not None and dialect.delimiter != ','
            csv_reader = csv.DictReader(csv_file)
            rows = []
            """
			recTypOptions: [
				{ value: 1, text: 'Daily' },
				{ value: 2, text: 'Monthly' },
				{ value: 3, text: 'Yearly' },
				{ value: 4, text: 'Constant' }
			]
			"""
            for row in csv_reader:
                if replace_commas:
                    for key in row:
                        row[key] = row[key].replace(',', '.', 1)
                row['recall_rec_id'] = recall_rec_id
                rows.append(row)

            if delete_existing:
                db.Recall_dat.delete().where(
                    db.Recall_dat.recall_rec_id == recall_rec_id).execute()

            db_lib.bulk_insert(project_base.db, db.Recall_dat, rows)
            db.Recall_rec.update(rec_typ=rec_typ).where(
                db.Recall_rec.id == recall_rec_id).execute()
    def read(self, database='project'):
        """
		Read a cal_parms.cal text file into the database.
		:param database: project or datasets
		:return:
		"""
        file = open(self.file_name, "r")

        i = 1
        values = []
        for line in file:
            if i > 3:
                val = line.split()
                self.check_cols(val, 5, 'cal_parms')

                v = {
                    'name': val[0].lower(),
                    'obj_typ': val[1],
                    'abs_min': val[2],
                    'abs_max': val[3],
                    'units': val[4] if val[4] != 'null' else None
                }
                values.append(v)
            i += 1

        if database == 'project':
            db_lib.bulk_insert(project_base.db, project_db.Cal_parms_cal,
                               values)
        else:
            db_lib.bulk_insert(datasets_base.db, datasets_db.Cal_parms_cal,
                               values)
    def read(self, database='project'):
        """
		Read a bmpuser.str text file into the database.
		:param database: project or datasets
		:return:
		"""
        file = open(self.file_name, "r")

        i = 1
        data = []
        for line in file:
            if i > 2:
                val = line.split()
                self.check_cols(val, 8, 'bmpuser.str')

                d = {
                    'name': val[0].lower(),
                    'flag': val[1],
                    'sed_eff': val[2],
                    'ptlp_eff': val[3],
                    'solp_eff': val[4],
                    'ptln_eff': val[5],
                    'soln_eff': val[6],
                    'bact_eff': val[7]
                }
                data.append(d)
            i += 1

        if database == 'project':
            db_lib.bulk_insert(project_base.db, db.Bmpuser_str, data)
        else:
            db_lib.bulk_insert(datasets_base.db, db_datasets.Bmpuser_str, data)
Example #15
0
    def put(self, project_db, id):
        try:
            SetupProjectDatabase.init(project_db)
            args = get_mgt_args()

            m = Management_sch.get(Management_sch.id == id)
            m.name = args['name']
            m.save()

            new_auto = []
            for a in args['auto_ops']:
                try:
                    dt = D_table_dtl.get((D_table_dtl.file_name == 'lum.dtl')
                                         & (D_table_dtl.name == a))
                    new_auto.append({
                        'management_sch_id': m.id,
                        'd_table_id': dt.id
                    })
                except D_table_dtl.DoesNotExist:
                    abort(
                        404,
                        message='Decision table {name} does not exist'.format(
                            name=a))

            new_ops = []
            order = 1
            for o in args['operations']:
                new_ops.append({
                    'management_sch_id': m.id,
                    'op_typ': o['op_typ'],
                    'mon': o['mon'],
                    'day': o['day'],
                    'op_data1': o['op_data1'],
                    'op_data2': o['op_data2'],
                    'op_data3': o['op_data3'],
                    'order': o['order'],
                    'hu_sch': o['hu_sch']
                })
                order += 1

            Management_sch_auto.delete().where(
                Management_sch_auto.management_sch_id == m.id).execute()
            lib.bulk_insert(base.db, Management_sch_auto, new_auto)

            Management_sch_op.delete().where(
                Management_sch_op.management_sch_id == m.id).execute()
            lib.bulk_insert(base.db, Management_sch_op, new_ops)

            return 200
        except IntegrityError as e:
            abort(400, message='Management schedule name must be unique.')
        except Cons_prac_lum.DoesNotExist:
            abort(404,
                  message='Management schedule {id} does not exist'.format(
                      id=id))
        except Exception as ex:
            abort(400, message="Unexpected error {ex}".format(ex=ex))
    def read(self, database='project'):
        """
		Read a septic.str text file into the database.
		:param database: project or datasets
		:return:
		"""
        file = open(self.file_name, "r")

        i = 1
        data = []
        for line in file:
            if i > 2:
                val = line.split()
                self.check_cols(val, 28, 'septic.str')

                d = {
                    'name': val[0].lower(),
                    'typ': val[1],
                    'yr': val[2],
                    'operation': val[3],
                    'residents': val[4],
                    'area': val[5],
                    't_fail': val[6],
                    'dp_bioz': val[7],
                    'thk_bioz': val[8],
                    'cha_dist': val[9],
                    'sep_dens': val[10],
                    'bm_dens': val[11],
                    'bod_decay': val[12],
                    'bod_conv': val[13],
                    'fc_lin': val[14],
                    'fc_exp': val[15],
                    'fecal_decay': val[16],
                    'tds_conv': val[17],
                    'mort': val[18],
                    'resp': val[19],
                    'slough1': val[20],
                    'slough2': val[21],
                    'nit': val[22],
                    'denit': val[23],
                    'p_sorp': val[24],
                    'p_sorp_max': val[25],
                    'solp_slp': val[26],
                    'solp_int': val[27]
                }
                data.append(d)
            i += 1

        if database == 'project':
            db_lib.bulk_insert(project_base.db, db.Septic_str, data)
        else:
            db_lib.bulk_insert(datasets_base.db, db_datasets.Septic_str, data)
Example #17
0
	def read_data(self, recall_rec_id, delete_existing):
		with open(self.file_name, mode='r') as csv_file:
			dialect = csv.Sniffer().sniff(csv_file.readline())
			csv_file.seek(0)
			replace_commas = dialect is not None and dialect.delimiter != ','
			csv_reader = csv.DictReader(csv_file)
			rows = []
			for row in csv_reader:
				if replace_commas:
					for key in row:
						row[key] = row[key].replace(',', '.', 1)
				row['recall_rec_id'] = recall_rec_id
				rows.append(row)

			if delete_existing:
				db.Recall_dat.delete().where(db.Recall_dat.recall_rec_id == recall_rec_id).execute()

			db_lib.bulk_insert(project_base.db, db.Recall_dat, rows)
Example #18
0
	def put(self, project_db, id):
		table = change.Water_balance_sft
		item_table = change.Water_balance_sft_item
		description = 'Calibration'

		parser = reqparse.RequestParser()
		parser.add_argument('id', type=int, required=False, location='json')
		parser.add_argument('name', type=str, required=True, location='json')
		parser.add_argument('items', type=list, required=False, location='json')
		args = parser.parse_args(strict=False)

		try:
			SetupProjectDatabase.init(project_db)
			result = self.save_args(table, args, id=id)

			if args['items'] is not None:
				items = []
				for c in args['items']:
					items.append({
						'water_balance_sft_id': id,
						'name': c['name'],
						'surq_rto': c['surq_rto'],
						'latq_rto': c['latq_rto'],
						'perc_rto': c['perc_rto'],
						'et_rto': c['et_rto'],
						'tileq_rto': c['tileq_rto'],
						'pet': c['pet'],
						'sed': c['sed'],
						'orgn': c['orgn'],
						'orgp': c['orgp'],
						'no3': c['no3'],
						'solp': c['solp']
					})
				
				item_table.delete().where(item_table.water_balance_sft_id == id).execute()
				lib.bulk_insert(project_base.db, item_table, items)

			return 200
		except IntegrityError as e:
			abort(400, message='{item} save error. '.format(item=description) + str(e))
		except table.DoesNotExist:
			abort(404, message='{item} {id} does not exist'.format(item=description, id=id))
		except Exception as ex:
			abort(400, message="Unexpected error {ex}".format(ex=ex))
    def create_weather_stations(
        self, start_prog, total_prog
    ):  # total_prog is the total progress percentage available for this method
        if self.__abort: return

        stations = []
        cursor = project_base.db.execute_sql(
            "select lat, lon from weather_file group by lat, lon")
        data = cursor.fetchall()
        records = len(data)
        i = 1
        for row in data:
            if self.__abort: return

            lat = row[0]
            lon = row[1]
            name = weather_sta_name(lat, lon)

            prog = round(i * total_prog / records) + start_prog
            # self.emit_progress(prog, "Creating weather station {name}...".format(name=name))

            try:
                existing = Weather_sta_cli.get(Weather_sta_cli.name == name)
            except Weather_sta_cli.DoesNotExist:
                station = {
                    "name": name,
                    "wnd_dir": None,
                    "atmo_dep": None,
                    "lat": lat,
                    "lon": lon,
                }
                """
					"hmd": closest_lat_lon(project_base.db, "weather_file", lat, lon, "hmd"),
					"pcp": closest_lat_lon(project_base.db, "weather_file", lat, lon, "pcp"),
					"slr": closest_lat_lon(project_base.db, "weather_file", lat, lon, "slr"),
					"tmp": closest_lat_lon(project_base.db, "weather_file", lat, lon, "tmp"),
					"wnd": closest_lat_lon(project_base.db, "weather_file", lat, lon, "wnd")
					"""

                stations.append(station)
            i += 1

        db_lib.bulk_insert(project_base.db, Weather_sta_cli, stations)
        self.match_files_to_stations(45, 45)
Example #20
0
    def read(self, database='project'):
        """
		Read a graze.ops text file into the database.
		:param database: project or datasets
		:return:
		"""
        file = open(self.file_name, "r")

        i = 1
        data = []
        for line in file:
            if i > 2:
                val = line.split()
                self.check_cols(val, 7, 'graze.ops')

                fert_name = val[1].strip().replace(
                    "-",
                    "_")  # Replace '-' with '_' due to mismatch in files...
                try:
                    fert = Fertilizer_frt.get(Fertilizer_frt.name == fert_name)

                    d = {
                        'name': val[0].lower(),
                        'fert': fert.id,
                        'bm_eat': val[2],
                        'bm_tramp': val[3],
                        'man_amt': val[4],
                        'grz_bm_min': val[5],
                        'description': val[6]
                    }
                    data.append(d)
                except Fertilizer_frt.DoesNotExist:
                    raise ValueError(
                        "Could not find matching fertilizer {fert_name} in database."
                        .format(fert_name=fert_name))

            i += 1

        if database == 'project':
            db_lib.bulk_insert(project_base.db, db.Graze_ops, data)
        else:
            db_lib.bulk_insert(datasets_base.db, db_datasets.Graze_ops, data)
    def create_weather_stations(
        self, start_prog, total_prog
    ):  # total_prog is the total progress percentage available for this method
        if self.__abort: return

        stations = []
        query = Weather_wgn_cli.select()
        records = query.count()
        i = 1
        for row in query:
            if self.__abort: return

            lat = row.lat
            lon = row.lon
            #name = "w{lat}{lon}".format(lat=abs(round(lat*1000)), lon=abs(round(lon*1000)))
            name = weather_sta_name(lat, lon)

            prog = round(i * total_prog / records) + start_prog
            # self.emit_progress(prog, "Creating weather station {name}...".format(name=name))

            try:
                existing = Weather_sta_cli.get(Weather_sta_cli.name == name)
            except Weather_sta_cli.DoesNotExist:
                station = {
                    "name": name,
                    "hmd": None,
                    "pcp": None,
                    "slr": None,
                    "tmp": None,
                    "wnd": None,
                    "wnd_dir": None,
                    "atmo_dep": None,
                    "lat": lat,
                    "lon": lon,
                    "wgn": row.id
                }

                stations.append(station)
            i += 1

        db_lib.bulk_insert(project_base.db, Weather_sta_cli, stations)
Example #22
0
	def post(self, project_db):
		try:
			args = get_mgt_args()

			m = Management_sch()
			m.name = args['name']
			m.save()

			new_auto = []
			for a in args['auto_ops']:
				try:
					dt = D_table_dtl.get((D_table_dtl.file_name == 'lum.dtl') & (D_table_dtl.name == a['name']))
					new_auto.append({'management_sch_id': m.id, 'd_table_id': dt.id, 'plant1': a['plant1'], 'plant2': a['plant2']})
				except D_table_dtl.DoesNotExist:
					abort(404, message='Decision table {name} does not exist'.format(name=a['name']))

			new_ops = []
			order = 1
			for o in args['operations']:
				new_ops.append({
					'management_sch_id': m.id,
					'op_typ': o['op_typ'],
					'mon': o['mon'],
					'day': o['day'],
					'op_data1': o['op_data1'],
					'op_data2': o['op_data2'],
					'op_data3': o['op_data3'],
					'order': o['order'],
					'hu_sch': o['hu_sch']
				})
				order += 1

			lib.bulk_insert(base.db, Management_sch_auto, new_auto)
			lib.bulk_insert(base.db, Management_sch_op, new_ops)

			return 201
		except IntegrityError as e:
			abort(400, message='Management schedule name must be unique.')
		except Exception as ex:
			abort(400, message="Unexpected error {ex}".format(ex=ex))
Example #23
0
    def read(self, database='project'):
        """
		Read a septic.sep text file into the database.
		NOTE: CURRENTLY THERE IS AN EXTRA NUMERIC COLUMN BEFORE THE DESCRIPTION.
		:param database: project or datasets
		:return:
		"""
        file = open(self.file_name, "r")

        i = 1
        septics = []
        for line in file:
            if i > 2:
                val = line.split()
                self.check_cols(val, 13, 'septic')

                sep = {
                    'name': val[0].lower(),
                    'q_rate': val[1],
                    'bod': val[2],
                    'tss': val[3],
                    'nh4_n': val[4],
                    'no3_n': val[5],
                    'no2_n': val[6],
                    'org_n': val[7],
                    'min_p': val[8],
                    'org_p': val[9],
                    'fcoli': val[10],
                    'description': val[12] if val[12] != 'null' else
                    None  # 12 index because extra column
                }
                septics.append(sep)
            i += 1

        if database == 'project':
            db_lib.bulk_insert(project_base.db, project_parmdb.Septic_sep,
                               septics)
        else:
            db_lib.bulk_insert(datasets_base.db, datasets_parmdb.Septic_sep,
                               septics)
Example #24
0
	def put(self, project_db, id):
		table = change.Ch_sed_budget_sft
		item_table = change.Ch_sed_budget_sft_item
		description = 'Calibration'

		parser = reqparse.RequestParser()
		parser.add_argument('id', type=int, required=False, location='json')
		parser.add_argument('name', type=str, required=True, location='json')
		parser.add_argument('items', type=list, required=False, location='json')
		args = parser.parse_args(strict=False)

		try:
			SetupProjectDatabase.init(project_db)
			result = self.save_args(table, args, id=id)

			if args['items'] is not None:
				items = []
				for c in args['items']:
					items.append({
						'ch_sed_budget_sft_id': id,
						'name': c['name'],
						'cha_wide': c['cha_wide'],
						'cha_dc_accr': c['cha_dc_accr'],
						'head_cut': c['head_cut'],
						'fp_accr': c['fp_accr']
					})
				
				item_table.delete().where(item_table.ch_sed_budget_sft_id == id).execute()
				lib.bulk_insert(project_base.db, item_table, items)

			return 200
		except IntegrityError as e:
			abort(400, message='{item} save error. '.format(item=description) + str(e))
		except table.DoesNotExist:
			abort(404, message='{item} {id} does not exist'.format(item=description, id=id))
		except Exception as ex:
			abort(400, message="Unexpected error {ex}".format(ex=ex))
Example #25
0
	def insert_var_range():
		file = open(source_data_path + 'var_range.csv', "r")
		
		i = 0
		items = []
		for line in file:
			if i > 0:
				val = line.split(',')
				items.append({
					'id': i,
					'table': val[0],
					'variable': val[1],
					'type': val[2],
					'min_value': val[3],
					'max_value': val[4],
					'default_value': val[5],
					'default_text': val[6],
					'units': val[7],
					'description': val[8]
				})
			
			i += 1
		
		db_lib.bulk_insert(base.db, definitions.Var_range, items)
Example #26
0
	def insert_var_range_option():
		file = open(source_data_path + 'var_range_option.csv', "r")
		
		i = 0
		items = []
		for line in file:
			if i > 0:
				val = line.split(',')

				vr = definitions.Var_range.get_or_none((definitions.Var_range.table == val[0]) & (definitions.Var_range.variable == val[1]))
				
				if vr is not None:
					items.append({
						'id': i,
						'var_range_id': vr.id,
						'value': val[2],
						'text': val[3],
						'text_only': True if val[4].strip() == '1' else False,
						'text_value': None if val[5].strip() == '' else val[5].strip()
					})
			
			i += 1
		
		db_lib.bulk_insert(base.db, definitions.Var_range_option, items)
    def add_wgn_stations_db(self, start_prog, total_prog):
        if self.__abort: return
        conn = sqlite3.connect(self.wgn_database)
        conn.row_factory = sqlite3.Row

        monthly_table = "{}_mon".format(self.wgn_table)

        if not db_lib.exists_table(conn, self.wgn_table):
            raise ValueError("Table {table} does not exist in {file}.".format(
                table=self.wgn_table, file=self.wgn_database))

        if not db_lib.exists_table(conn, monthly_table):
            raise ValueError("Table {table} does not exist in {file}.".format(
                table=monthly_table, file=self.wgn_database))

        if Rout_unit_con.select().count() > 0:
            coords = Rout_unit_con.select(
                fn.Min(Rout_unit_con.lat).alias("min_lat"),
                fn.Max(Rout_unit_con.lat).alias("max_lat"),
                fn.Min(Rout_unit_con.lon).alias("min_lon"),
                fn.Max(Rout_unit_con.lon).alias("max_lon")).get()

            query = "select * from {table_name} where lat between ? and ? and lon between ? and ? order by name".format(
                table_name=self.wgn_table)
            tol = 0.5
            cursor = conn.cursor().execute(
                query, (coords.min_lat - tol, coords.max_lat + tol,
                        coords.min_lon - tol, coords.max_lon + tol))
        elif Chandeg_con.select().count() > 0:
            coords = Chandeg_con.select(
                fn.Min(Chandeg_con.lat).alias("min_lat"),
                fn.Max(Chandeg_con.lat).alias("max_lat"),
                fn.Min(Chandeg_con.lon).alias("min_lon"),
                fn.Max(Chandeg_con.lon).alias("max_lon")).get()

            query = "select * from {table_name} where lat between ? and ? and lon between ? and ? order by name".format(
                table_name=self.wgn_table)
            tol = 0.5
            cursor = conn.cursor().execute(
                query, (coords.min_lat - tol, coords.max_lat + tol,
                        coords.min_lon - tol, coords.max_lon + tol))
        else:
            query = "select * from {table_name} order by name".format(
                table_name=self.wgn_table)
            cursor = conn.cursor().execute(query)

        wgns = []
        ids = []

        data = cursor.fetchall()
        records = len(data)
        #print(records)

        i = 1
        print('\t - Preparing weather generator')
        for row in data:
            if self.__abort: return

            try:
                existing = Weather_wgn_cli.get(
                    Weather_wgn_cli.name == row['name'])
            except Weather_wgn_cli.DoesNotExist:
                prog = round(i * (total_prog / 2) / records) + start_prog
                # self.emit_progress(prog, "Preparing weather generator {name}...".format(name=row['name']))
                i += 1

                ids.append(row['id'])
                wgn = {
                    "id": row['id'],
                    "name": row['name'],
                    "lat": row['lat'],
                    "lon": row['lon'],
                    "elev": row['elev'],
                    "rain_yrs": row['rain_yrs']
                }
                wgns.append(wgn)

        prog = start_prog if records < 1 else round(i * (total_prog / 2) /
                                                    records) + start_prog
        # self.emit_progress(prog, "Inserting {total} weather generators...".format(total=len(ids)))
        db_lib.bulk_insert(project_base.db, Weather_wgn_cli, wgns)

        # Chunk the id array so we don't hit the SQLite parameter limit!
        max_length = 999
        id_chunks = [
            ids[i:i + max_length] for i in range(0, len(ids), max_length)
        ]

        i = 1
        start_prog = start_prog + (total_prog / 2)

        mon_count_query = "select count(*) from {table_name}".format(
            table_name=monthly_table)
        total_mon_rows = conn.cursor().execute(mon_count_query).fetchone()[0]
        current_total = 0

        for chunk in id_chunks:
            monthly_values = []
            mon_query = "select * from {table_name} where wgn_id in ({ids})".format(
                table_name=monthly_table, ids=",".join('?' * len(chunk)))
            mon_cursor = conn.cursor().execute(mon_query, chunk)
            mon_data = mon_cursor.fetchall()
            mon_records = len(mon_data)
            i = 1

            for row in mon_data:
                if self.__abort: return

                if i == 1 or (i % 12 == 0):
                    prog = round(i *
                                 (total_prog / 2) / mon_records) + start_prog
                    # self.emit_progress(prog, "Preparing monthly values {i}/{total}...".format(i=i, total=mon_records))
                i += 1

                mon = {
                    "weather_wgn_cli": row['wgn_id'],
                    "month": row['month'],
                    "tmp_max_ave": row['tmp_max_ave'],
                    "tmp_min_ave": row['tmp_min_ave'],
                    "tmp_max_sd": row['tmp_max_sd'],
                    "tmp_min_sd": row['tmp_min_sd'],
                    "pcp_ave": row['pcp_ave'],
                    "pcp_sd": row['pcp_sd'],
                    "pcp_skew": row['pcp_skew'],
                    "wet_dry": row['wet_dry'],
                    "wet_wet": row['wet_wet'],
                    "pcp_days": row['pcp_days'],
                    "pcp_hhr": row['pcp_hhr'],
                    "slr_ave": row['slr_ave'],
                    "dew_ave": row['dew_ave'],
                    "wnd_ave": row['wnd_ave']
                }
                monthly_values.append(mon)

            prog = round(i * (total_prog / 2) / mon_records) + start_prog
            current_total = current_total + mon_records
            # self.emit_progress(prog, "Inserting monthly values {rec}/{total}...".format(rec=current_total, total=total_mon_rows))
            db_lib.bulk_insert(project_base.db, Weather_wgn_cli_mon,
                               monthly_values)
    def add_weather_files_type(self, source_file, weather_type, prog):
        start_date = None
        end_date = None
        starts = []
        ends = []
        if os.path.exists(source_file):
            # self.emit_progress(prog, "Inserting {type} files and coordinates...".format(type=weather_type))
            weather_files = []
            dir = os.path.dirname(source_file)
            with open(source_file, "r") as source_data:
                i = 0
                for line in source_data:
                    if self.__abort:
                        break

                    if i > 1:
                        station_name = line.strip('\n')
                        station_file = os.path.join(dir, station_name)
                        if not os.path.exists(station_file):
                            raise IOError(
                                "File {file} not found. Weather data import aborted."
                                .format(file=station_file))

                        try:
                            existing = Weather_file.get(
                                (Weather_file.filename == station_name)
                                & (Weather_file.type == weather_type))
                        except Weather_file.DoesNotExist:
                            with open(station_file, "r") as station_data:
                                j = 0
                                for sline in station_data:
                                    if j == 2:
                                        station_info = sline.strip().split()
                                        if len(station_info) < 4:
                                            raise ValueError(
                                                "Invalid value at line {ln} of {file}. Expecting nbyr, tstep, lat, long, elev values separated by a space."
                                                .format(ln=str(j + 1),
                                                        file=station_file))

                                        lat = float(station_info[2])
                                        lon = float(station_info[3])

                                        file = {
                                            "filename": station_name,
                                            "type": weather_type,
                                            "lat": lat,
                                            "lon": lon
                                        }
                                        weather_files.append(file)
                                    elif j == 3:
                                        begin_data = sline.strip().split()
                                        if len(begin_data) < 3:
                                            raise ValueError(
                                                "Invalid value at line {ln} of {file}. Expecting year, julian day, and weather value separated by a space."
                                                .format(ln=str(j + 1),
                                                        file=station_file))

                                        date = datetime.datetime(
                                            int(begin_data[0]), 1, 1)
                                        current_start_date = date + datetime.timedelta(
                                            days=int(begin_data[1]) - 1)
                                        #if start_date is not None and current_start_date != start_date:
                                        #	raise ValueError("Start dates in weather files do not match. Make sure all weather files have the same starting and ending dates.")

                                        #start_date = current_start_date
                                        starts.append(current_start_date)
                                    elif j > 3:
                                        break

                                    j += 1

                                non_empty_lines = [
                                    sline for sline in station_data if sline
                                ]
                                last_line = non_empty_lines[
                                    len(non_empty_lines) - 1].strip().split()
                                date = datetime.datetime(
                                    int(last_line[0]), 1, 1)
                                current_end_date = date + datetime.timedelta(
                                    days=int(last_line[1]) - 1)
                                #if end_date is not None and current_end_date != end_date:
                                #	raise ValueError("Ending dates in weather files do not match. Make sure all weather files have the same starting and ending dates.")

                                #end_date = current_end_date
                                ends.append(current_end_date)

                    i += 1

            db_lib.bulk_insert(project_base.db, Weather_file, weather_files)
            if len(starts) > 0 and len(ends) > 0:
                start_date = max(starts)
                end_date = min(ends)
        return start_date, end_date
Example #29
0
	def insert_lum():
		file = open(source_data_path + 'plants_landuse_rules.csv', "r")
		
		i = 1
		rules = {}
		for line in file:
			if i > 1:
				val = line.split(',')
				n = val[0].lower().strip()
				lc = int(val[6])
				rules[n] = {
					'mgt': None,
					'cn2': val[3],
					'cons_prac': val[4],
					'ov_mann': val[5],
					'lc_status': True if lc is 1 else False,
					'lai_init': float(val[7]),
					'bm_init': float(val[8]),
					'phu_init': float(val[9]),
					'plnt_pop': float(val[10]),
					'yrs_init': float(val[11]),
					'rsd_init': float(val[12])
				}
			
			i += 1
			
		plants = hru_parm_db.Plants_plt.select()
		
		plant_coms = []
		plant_com_items = []
		plant_com_id = 1
		for plt in plants:
			rule = rules[plt.name]
			
			plant_com = {
				'id': plant_com_id,
				'name': '{name}_comm'.format(name=plt.name),
				'rot_yr_ini': 1
			}
			plant_coms.append(plant_com)
			
			plant_com_item = {
				'plant_ini': plant_com_id,
				'plnt_name': plt.id,
				'lc_status': rule['lc_status'],
				'lai_init': rule['lai_init'],
				'bm_init': rule['bm_init'],
				'phu_init': rule['phu_init'],
				'plnt_pop': rule['plnt_pop'],
				'yrs_init': rule['yrs_init'],
				'rsd_init': rule['rsd_init']
			}
			plant_com_items.append(plant_com_item)
			plant_com_id += 1
		
		db_lib.bulk_insert(base.db, init.Plant_ini, plant_coms)
		db_lib.bulk_insert(base.db, init.Plant_ini_item, plant_com_items)
		
		lum_default_cal_group = None
		lum_default_mgt = None #lum.Management_sch.get(lum.Management_sch.name == 'no_mgt').id
		lum_default_cn2 = 5
		lum_default_cons_prac = 1
		lum_default_ov_mann = 2
		
		lums = []
		lum_dict = {}
		lum_id = 1
		for pcom in init.Plant_ini.select().order_by(init.Plant_ini.id):
			plant_name = pcom.name.strip().split('_comm')[0]
			rule = rules[plant_name]
			
			"""mgt_id = lum_default_mgt
			if val_exists(rule['mgt']):
				mgt = lum.Management_sch.get(lum.Management_sch.name == rule['mgt'])
				mgt_id = mgt.id"""
				
			cn2_id = lum_default_cn2
			if val_exists(rule['cn2']):
				cn2 = lum.Cntable_lum.get(lum.Cntable_lum.name == rule['cn2'])
				cn2_id = cn2.id
				
			cons_prac_id = lum_default_cons_prac
			if val_exists(rule['cons_prac']):
				cons_prac = lum.Cons_prac_lum.get(lum.Cons_prac_lum.name == rule['cons_prac'])
				cons_prac_id = cons_prac.id
				
			ov_mann_id = lum_default_ov_mann
			if val_exists(rule['ov_mann']):
				ov_mann = lum.Ovn_table_lum.get(lum.Ovn_table_lum.name == rule['ov_mann'])
				ov_mann_id = ov_mann.id
			
			l = {
				'id': lum_id,
				'name': '{name}_lum'.format(name=plant_name),
				'plnt_com': pcom.id,
				'mgt': None, #mgt_id,
				'cn2': cn2_id,
				'cons_prac': cons_prac_id,
				'ov_mann': ov_mann_id,
				'cal_group': lum_default_cal_group
			}
			lums.append(l)
			
			lum_dict[plant_name] = lum_id
			lum_id += 1
			
		db_lib.bulk_insert(base.db, lum.Landuse_lum, lums)
		
		urbans = hru_parm_db.Urban_urb.select()
		urb_lums = []
		for urb in urbans:
			l = {
				'id': lum_id,
				'name': '{name}_lum'.format(name=urb.name),
				'urban': urb.id,
				'urb_ro': 'buildup_washoff',
				'mgt': lum_default_mgt,
				'cn2': 49,
				'cons_prac': lum_default_cons_prac,
				'ov_mann': 18,
				'cal_group': lum_default_cal_group
			}
			urb_lums.append(l)
			
			lum_dict[urb.name] = lum_id
			lum_id += 1
			
		db_lib.bulk_insert(base.db, lum.Landuse_lum, urb_lums)
Example #30
0
    def read_default_table(self,
                           file_name,
                           name,
                           table,
                           db,
                           start_line,
                           ignore_id_col=True,
                           desc_key=''):
        file = open(file_name, 'r')
        read_units = False if desc_key in ignore_units else True

        i = 1
        rows = []
        fields = table._meta.sorted_fields
        file_fields = []
        for line in file:
            if read_units and i == start_line - 2:
                for h in line.strip().split():
                    file_fields.append(h.strip())
            elif read_units and i == start_line - 1:
                units = line.strip().split()
                ui = units_start_column_index.get(desc_key,
                                                  default_units_column_index)
                reverse_index = True if desc_key in reversed_unit_lines else False
                col_descs = []
                for x in range(0, len(units)):
                    try:
                        column_name_val = file_fields[
                            x] if reverse_index else file_fields[ui + x]
                        units_val = units[ui +
                                          x] if reverse_index else units[x]

                        col_desc = {
                            'table_name': name,
                            'column_name': column_name_val,
                            'units': units_val
                        }
                        col_descs.append(col_desc)
                    except IndexError:
                        pass
                db_lib.bulk_insert(db, base.Column_description, col_descs)
            elif i >= start_line:
                val = line.strip().split()

                row = {}
                j = 0
                for field in fields:
                    skip = False
                    if ignore_id_col and field.name == 'id':
                        skip = True

                    if not skip:
                        try:
                            row[field.name] = None if j >= len(
                                val) or '*' in str(val[j]) else val[j]
                        except IndexError:
                            pass
                        j += 1
                rows.append(row)

                if len(rows) == 1000:
                    db_lib.bulk_insert(db, table, rows)
                    rows = []
            i += 1

        db_lib.bulk_insert(db, table, rows)