Esempio n. 1
0
def cal_curl(year, month, product_n = 3):
	taux = D.get_data(year, month, 'taux', 1, product_n)
	tauy = D.get_data(year, month, 'tauy', 1, product_n)
	xgrid, ygrid, zgrid = D.get_grid_value('taux', product_n)
	xn = xgrid.size
	yn = ygrid.size
	curl = np.zeros([yn, xn])
	ns_dist = subroutine.dist_on_sphere([0.0, - 0.5], [0.0, 0.5])

	for j in range(0, yn):
		if j == yn - 1:
			curl[j, :] = np.nan
		else:
			y0 = j
			y1 = j + 1
			lat0 = ygrid[y0]
			lat1 = ygrid[y1]
			tmpdist = np.average(np.array([lat0, lat1]))
			ew_dist = subroutine.dist_on_sphere([0.0, tmpdist], [1.0, tmpdist])

			for i in range(0, xn):
				x0 = i
				lon0 = xgrid[x0]
				if i == xn - 1:
					x1 = 0
					lon1 = xgrid[x1]
				else:
					x1 = i + 1
					lon1 = xgrid[x1]

				taux00 = taux[y0, x0]
				tauy00 = tauy[y0, x0]
				taux01 = taux[y1, x0]
				tauy01 = tauy[y1, x0]
				taux10 = taux[y0, x1]
				tauy10 = tauy[y0, x1]
				taux11 = taux[y1, x1]
				tauy11 = tauy[y1, x1]
				a = Using_jit.average_of_2data(tauy10, tauy11)
				b = Using_jit.average_of_2data(tauy00, tauy01)
				c = Using_jit.average_of_2data(taux01, taux11)
				d = Using_jit.average_of_2data(taux00, taux10)
				if np.isnan(a - b) == False and np.isnan(c - d) == False:
					curl[j, i] = (a - b) / ew_dist - (c - d) / ns_dist
				elif np.isnan(a - b) == False:
					curl[j, i] = (a - b) / ew_dist
				elif np.isnan(c - d) == False:
					curl[j, i] = - (c - d) / ns_dist
				else:
					curl[j, i] = np.nan

	return curl
Esempio n. 2
0
	def save_Area_Trimmed_variable_as_npz(self, year, month, var, product_n):
		import Var
		import D
		id = Var.var_to_id(var)
		VV = Var.VAR[id]
		if VV.dim == '3D':
			data = D.get_data(year, month, var, 0, product_n)
		elif VV.dim == '2D':
			data = D.get_data(year, month, var, 1, product_n)
		else:
			raise ValueError('your var is not valid!')

		Data, _, _ = self.Get_data_of_area(data, var, product_n)
		self.save_Area_Trimmed_data_as_npz(Data, year, month, VV.dir_name, VV.formal_name, product_n)
Esempio n. 3
0
	def Get_Current_of_VerticalSection_from_UV(self, u, v, product_n):
		import D
		import numpy as np
		xgrid, ygrid, zgrid = D.get_grid_value('u', product_n)
		Ln = self.Pn
		if self.direction == 'EW':
			Data = np.zeros((self.band - 1, Ln - 1, zgrid.size))
			for i in range(0, Ln - 1):
				xn = np.where(xgrid == self.hgrid[i] + 0.5)[0][0]
				x = self.lonp[i + 1] - self.lonp[i]
				y = self.latp[i + 1] - self.latp[i]
				cos_alpha = x / np.sqrt(x ** 2 + y ** 2)
				sin_alpha = y / np.sqrt(x ** 2 + y ** 2)
				for j in range(0, self.band - 1):
					yn = np.where(ygrid == self.latp[i] + j + 0.5)[0][0]
					Data[j, i, :] = cos_alpha * u[yn, xn, :] + sin_alpha * v[yn, xn, :]

		elif self.direction == 'NS':
			Data = np.zeros((Ln - 1, self.band - 1, zgrid.size))
			for j in range(0, Ln - 1):
				yn = np.where(ygrid == self.hgrid[j] + 0.5)[0][0]
				x = self.lonp[j + 1] - self.lonp[j]
				y = self.latp[j + 1] - self.latp[j]
				cos_alpha = x / np.sqrt(x ** 2 + y ** 2)
				sin_alpha = y / np.sqrt(x ** 2 + y ** 2)
				for i in range(0, self.band - 1):
					xn = np.where(xgrid == self.lonp[j] + i + 0.5)[0][0]
					Data[j, i, :] = cos_alpha * u[yn, xn, :] + sin_alpha * v[yn, xn, :]

		else:
			raise ValueError("your direction is not valid!")

		return np.average(Data, axis = self.average_axis), self.hgrid[:self.Pn - 1] + 0.5, zgrid
Esempio n. 4
0
	def Get_VerticalSection_from_2Ddata(self, data, var, product_n):
		import D
		import numpy as np
		xgrid, ygrid, zgrid = D.get_grid_value(var, product_n)
		Ln = self.Pn
		if self.direction == 'EW':
			Data = np.zeros((self.band, Ln))
			for i in range(0, Ln):
				xn = np.where(xgrid == self.hgrid[i])[0][0]
				for j in range(0, self.band):
					yn = np.where(ygrid == self.latp[i] + j)[0][0]
					Data[j, i] = data[yn, xn]

		elif self.direction == 'NS':
			Data = np.zeros((Ln, self.band))
			for j in range(0, Ln):
				yn = np.where(ygrid == self.hgrid[j])[0][0]
				for i in range(0, self.band):
					xn = np.where(xgrid == self.lonp[j] + i)[0][0]
					Data[j, i] = data[yn, xn]

		else:
			raise ValueError("your direction is not valid!")

		return np.average(Data, axis = self.average_axis), self.hgrid
Esempio n. 5
0
def get_depth_of_minimum_of_vertical_gradient_from_data(data, var, product_n = 3):
	xgrid, ygrid, zgrid = D.get_grid_value(var, product_n)
	xn = xgrid.size
	yn = ygrid.size
	zn = zgrid.size

	Grad = np.zeros((yn, xn, zn))
	for k in range(1, zn - 1):
		dz = 0.5 * (zgrid[k + 1] - zgrid[k - 1])
		Grad[:, :, k] = (data[:, :, k + 1] - data[:, :, k - 1]) / dz

	Grad[np.where(np.isnan(Grad) == True)] = np.inf
	GradMin = np.min(Grad, axis = 2)

	Depth_of_GradMin = np.zeros((yn, xn))
	for j in range(0, yn):
		for i in range(0, xn):
			if GradMin[j, i] == 0.0:
				Depth_of_GradMin[j, i] = np.nan
			else:
				K1 = np.where(Grad[j, i, :] == GradMin[j, i])[0][0]
				Depth_of_GradMin[j, i] = zgrid[K1]

	Depth_of_GradMin[np.where(np.abs(Depth_of_GradMin) == np.inf)] = np.nan
	Depth_of_GradMin[np.where(Depth_of_GradMin == 0.0)] = np.nan

	return Depth_of_GradMin
Esempio n. 6
0
def get_depth_of_maximum_of_vertical_gradient_from_profile(profile, product_n = 3):
	_, _, zgrid = D.get_grid_value('s', product_n)
	zn = zgrid.size
	Grad = np.zeros(zn)
	for k in range(1, zn - 1):
		dz = 0.5 * (zgrid[k + 1] - zgrid[k - 1])
		Grad[k] = (profile[k + 1] - profile[k - 1]) / dz

	Grad[np.where(np.isnan(Grad) == True)] = -np.inf
	GradMax = np.max(Grad)
	Grad[np.where(Grad == -np.inf)] = np.inf
	GradMin = np.min(Grad)

	if GradMax == 0.0 and GradMin == 0.0:
		Depth_of_GradMax = np.nan
	else:
		K1 = np.where(Grad == GradMax)[0][0]
		K2 = np.where(Grad == GradMin)[0][0]
		if GradMax >= abs(GradMin):
			Depth_of_GradMax = zgrid[K1]
		else:
			Depth_of_GradMax = zgrid[K2]


	return Depth_of_GradMax
Esempio n. 7
0
def cal_Ue_or_Ve(year, month, product_n, Ue_or_Ve, nu):
	taux = D.get_data(year, month, 'taux', 1, product_n)
	tauy = D.get_data(year, month, 'tauy', 1, product_n)
	xgrid, ygrid, zgrid = D.get_grid_value('taux', product_n)

	if Ue_or_Ve == 'Ue':
		Ue, _ = cal_EkmanCurrent_from_tau(taux, tauy, ygrid, nu)
	elif Ue_or_Ve == 'Ve':
		_, Ue = cal_EkmanCurrent_from_tau(taux, tauy, ygrid, nu)
	else:
		raise ValueError('your Ue_or_Ve argument is not valid!')

	Ue = convert.convert_Sgrid_value_to_UVgrid_value_3D(Ue)
	a = np.zeros((ygrid.size, xgrid.size, zgrid.size - M)) * np.nan # 鉛直にuやvと同じだけ層を作ってやる
	Ue = np.c_[Ue, a]
	return Ue
Esempio n. 8
0
	def Get_VerticalSection_from_data(self, data, var, product_n):
		import D
		import numpy as np
		import convert
		xgrid, ygrid, zgrid = D.get_grid_value('t', product_n)
		if var == 'u' or var == 'v':
			data = convert.convert_UVgrid_value_to_Sgrid_value_3D(data)

		Ln = self.Pn
		if self.direction == 'EW':
			Data = np.zeros((self.band, Ln, zgrid.size))
			for i in range(0, Ln):
				xn = np.where(xgrid == self.hgrid[i])[0][0]
				for j in range(0, self.band):
					yn = np.where(ygrid == self.latp[i] + j)[0][0]
					Data[j, i, :] = data[yn, xn, :]

		elif self.direction == 'NS':
			Data = np.zeros((Ln, self.band, zgrid.size))
			for j in range(0, Ln):
				yn = np.where(ygrid == self.hgrid[j])[0][0]
				for i in range(0, self.band):
					xn = np.where(xgrid == self.lonp[j] + i)[0][0]
					Data[j, i, :] = data[yn, xn, :]

		else:
			raise ValueError("your direction is not valid!")

		return np.average(Data, axis = self.average_axis), self.hgrid, zgrid
Esempio n. 9
0
	def draw_line_with_data_map(self, plt, year, month, var, depth, cb_min, cb_max, product_n = 3, xlim = [40, 110], ylim = [ - 20, 30], \
								fsizex = 10, fsizey = 6, div = 20.0, interval = 10, color = 'red', linetype = '--'):
		import D
		import Var
		import quick
		import subroutine
		xgrid, ygrid, zgrid = D.get_grid_value(var, product_n)
		data = D.get_data(year, month, var, depth, product_n)
		stryear, strmonth = subroutine.strym(year, month)
		vid = Var.var_to_id(var)
		formal_name = Var.VAR[vid].Get_formal_name()
		title_name = D.Data[product_n].title_name
		clabel = stryear + '/' + strmonth + ' ' + formal_name + ' ' + ' at ' + str(zgrid[depth - 1]) + 'm '+ title_name
		plta = quick.draw_with_axis_and_map(data, ygrid, xgrid, cb_min, cb_max, xlim = xlim, ylim = ylim, \
											interval = interval, clabel = clabel, fsizex = fsizex, fsizey = fsizey)
		plta = self.draw_line(plta, xlim, ylim, interval = interval, color = color, linetype = linetype)
		return plta
Esempio n. 10
0
def test2():
    N = 5
    W = 5
    w = [1, 1, 1, 1, 1]
    v = [1e9, 1e9, 1e9, 1e9, 1e9]
    vsum = D.calc_vsum(N, W, w, v)
    print(vsum)
    assert max(vsum[N, :]) == 5e9
Esempio n. 11
0
def test3():
    N = 6
    W = 15
    w = [6, 5, 6, 6, 3, 7]
    v = [5, 6, 4, 6, 5, 2]
    vsum = D.calc_vsum(N, W, w, v)
    print(vsum)
    assert max(vsum[N, :]) == 17
Esempio n. 12
0
def test1():
    N = 3
    W = 8
    w = [3, 4, 5]
    v = [30, 50, 60]
    vsum = D.calc_vsum(N, W, w, v)
    print(vsum)
    assert max(vsum[N, :]) == 90
Esempio n. 13
0
def get_Gradient_of_variable_from_data(data, var, product_n = 3):
	xgrid, ygrid, zgrid = D.get_grid_value(var, product_n)
	xn = xgrid.size
	yn = ygrid.size
	zn = zgrid.size

	Data_ns = np.zeros((yn, xn, zn))
	Data_ew = np.zeros((yn, xn, zn))
	ns_dist = subroutine.dist_on_sphere([0.0, - 0.5], [0.0, 0.5])

	for j in range(0, yn):
		if j == 0 or j == yn - 1:
			Data_ns[j, :, :] = np.nan
			Data_ew[j, :, :] = np.nan
		else:
			y0 = j - 1
			y1 = j
			y2 = j + 1
			lat1 = ygrid[y1]
			ew_dist = subroutine.dist_on_sphere([0.0, lat1], [1.0, lat1])

			for i in range(0, xn):
				x1 = i
				lon1 = xgrid[x1]
				if i == 0:
					x0 = xn - 1
					x2 = i + 1
				elif i == xn - 1:
					x0 = i - 1
					x2 = xn - 1
				else:
					x0 = i - 1
					x2 = i + 1

				data01 = data[y1, x0, :]
				data10 = data[y0, x1, :]
				data11 = data[y1, x1, :]
				data21 = data[y1, x2, :]
				data12 = data[y2, x1, :]

				if np.isnan(data01[0]) == True and np.isnan(data21[0]) == False:
					Data_ew[j, i, :] = (data21 - data11) / ew_dist
				elif np.isnan(data21[0]) == True and np.isnan(data01[0]) == False:
					Data_ew[j, i, :] = (data11 - data01) / ew_dist
				else:
					Data_ew[j, i, :] = (data21 - data01) / (2.0 * ew_dist)

				if np.isnan(data10[0]) == True and np.isnan(data12[0]) == False:
					Data_ns[j, i, :] = (data12 - data11) / ns_dist
				elif np.isnan(data12[0]) == True and np.isnan(data10[0]) == False:
					Data_ns[j, i, :] = (data11 - data10) / ns_dist
				else:
					Data_ns[j, i, :] = (data12 - data10) / (2.0 * ns_dist)

	return Data_ew, Data_ns
def get_UgVg_from_RawSSH(year, month, product_n = 3):
	product_n = 3
	ht = get_smoothed_ssh(year, month, product_n)
	Ug, Vg, xgrid, ygrid = cal_GeostrophicCurrent_from_SSH(ht)

	# いらない部分の数字は欠損値ということで
	u = D.get_data(year, month, 'u', 1, product_n)
	Ug[np.where(np.isnan(u) == True)] = np.nan
	Vg[np.where(np.isnan(u) == True)] = np.nan

	return Ug, Vg, xgrid, ygrid
Esempio n. 15
0
	def save_data_as_npz(self, Timeseries, fy, ly, var, depth = 1, product_n = 3):
		import D
		import Var
		import subroutine
		vid = Var.var_to_id(var)
		formal_name = Var.VAR[vid].Get_formal_name()
		title_name = D.Data[product_n].title_name
		xgrid, ygrid, zgrid = D.get_grid_value(var, product_n)
		subroutine.save_npz(Timeseries, self.AreaName + '_Area-Averaged-' + \
							formal_name + '_at_' + str(zgrid[depth - 1]) + \
							'm_Year' + str(fy) + '-' + str(ly) + '_' + title_name)
Esempio n. 16
0
def product_grid_info(var, data_or_size, product_n = 3):		# グリッドに関するデータを獲得する
	import D
	if type(product_n) != int:								# product_nが整数でない時はエラーを吐き出す。
		raise Exception('product_n is not integer!')

	xgrid, ygrid, zgrid = D.get_grid_value(var, product_n)
	if data_or_size == 'data':
		return xgrid, ygrid, zgrid
	elif data_or_size == 'size':
		return xgrid.size, ygrid.size, zgrid.size
	else:
		raise Exception('error! your character is not valid!')
def cal_GeostrophicCurrent_from_SSH(ssh, product_n = 3):
	ns_dist = subroutine.dist_on_sphere([0.0, - 0.5], [0.0, 0.5])
	xgrid, ygrid, zgrid = D.get_grid_value('ht', product_n)
	yn = ygrid.size
	xn = xgrid.size
	Ug = np.zeros((yn, xn))
	Vg = np.zeros((yn, xn))
	xgrid_new = np.zeros(xn)
	ygrid_new = np.zeros(yn)

	for j in range(0, yn):
		if j == yn - 1:
			ygrid_new[j] = 0.5 * (ygrid[j] + ygrid[j] + 1)
			Ug[j, :] = np.nan
			Vg[j, :] = np.nan
		else:
			y0 = j
			y1 = j + 1
			ygrid_new[j] = 0.5 * (ygrid[y0] + ygrid[y1])

			if abs(ygrid_new[j]) <= 2.0: # 赤道付近においては地衡流は計算しない
				Ug[j, :] = np.nan
				Vg[j, :] = np.nan
			else:
				ew_dist = subroutine.dist_on_sphere([0.0, ygrid_new[j]], [1.0, ygrid_new[j]])
				f = subroutine.f0(ygrid_new[j])

				for i in range(0, xn):
					x0 = i
					if i == xn - 1:
						x1 = 0
						lon = 0.5 * (xgrid[i] + xgrid[i] + 1.0)
					else:
						x1 = i + 1
						lon = 0.5 * (xgrid[x0] + xgrid[x1])

					if j == 1:
						xgrid_new[i] = lon

					ssh00 = ssh[y0, x0]
					ssh01 = ssh[y1, x0]
					ssh10 = ssh[y0, x1]
					ssh11 = ssh[y1, x1]
					a = Using_jit.average_of_2data(ssh01, ssh11)
					b = Using_jit.average_of_2data(ssh00, ssh10)
					c = Using_jit.average_of_2data(ssh10, ssh11)
					d = Using_jit.average_of_2data(ssh00, ssh01)

					Ug[j, i] = -g / f * (a - b) / ns_dist
					Vg[j, i] = g / f * (c - d) / ew_dist


	return Ug, Vg, xgrid_new, ygrid_new
def get_smoothed_ssh(year, month, product_n = 3):
	ssh = D.get_data(year, month, 'ht', 1, product_n)
	xgrid, ygrid, zgrid = D.get_grid_value('ht', product_n)
	xn = xgrid.size
	yn = ygrid.size
	ssh_new = np.zeros((yn, xn))

	for j in range(0, yn):
		if j == 0 or j == yn - 1:
			ssh_new[j, :] = np.nan
		else:
			y0 = j - 1
			y1 = j
			y2 = j + 1

			for i in range(0, xn):
				x1 = i
				lon1 = xgrid[x1]
				if i == 0:
					x0 = xn - 1
					x2 = i + 1
				elif i == xn - 1:
					x0 = i - 1
					x2 = xn - 1
				else:
					x0 = i - 1
					x2 = i + 1

				ssh01 = ssh[y1, x0]
				ssh10 = ssh[y0, x1]
				ssh11 = ssh[y1, x1]
				ssh21 = ssh[y1, x2]
				ssh12 = ssh[y2, x1]
				if np.isnan(ssh11) == True:
					ssh_new[j, i] = np.nan
				else:
					ssh_around = np.array([ssh01, ssh10, ssh21, ssh12])
					ssh_new[j, i] = 0.5 * (ssh11 + np.average(ssh_around[np.where(np.isnan(ssh_around) == False)]))

	return ssh_new / 100.0
Esempio n. 19
0
	def load_data_of_npz(self, fy, ly, var, depth = 1, product_n = 3):
		import D
		import Var
		import subroutine
		vid = Var.var_to_id(var)
		formal_name = Var.VAR[vid].Get_formal_name()
		title_name = D.Data[product_n].title_name
		xgrid, ygrid, zgrid = D.get_grid_value(var, product_n)
		Timeseries = subroutine.load_npz(self.AreaName + '_Area-Averaged-' + \
										 formal_name + '_at_' + str(zgrid[depth - 1]) + \
										 'm_Year' + str(fy) + '-' + str(ly) + '_' + title_name)
		months, label = subroutine.get_months_and_label(fy, ly)
		return Timeseries, months, label
Esempio n. 20
0
	def cal_Mass_Budget(self, year, month, depth = 10, product_n = 3):
		# その領域における体積収支を計算する。
		# 注意! x,y,z軸方向のグリッドの大きさはすべて等しいと仮定して計算しております。
		# さらに、領域は南北に長くなく、EWdistで南北両側面の断面積を計算していいこととしております。
		import D
		import D2
		import numpy as np
		u = D.get_data(year, month, 'u', 0, product_n)[:, :, :depth]
		v = D.get_data(year, month, 'v', 0, product_n)[:, :, :depth]
		w = D.get_data(year, month, 'w', depth, product_n)
		ssh = D2.D2Data[33].load_data_of_npz(year, month, product_n)
		ssh_Sec = self.Get_data_of_AreaSection_from_data(ssh, 'ht', product_n)
		_, _, zgrid = D.get_grid_value('w', product_n)
		u = self.Get_data_of_AreaSection_from_data(u, 'u', product_n)
		v = self.Get_data_of_AreaSection_from_data(v, 'v', product_n)
		w, _, _ = self.Get_data_of_area(w, 'w', product_n)

		# 海面力学高度の分だけ、各断面の高さに下駄を履かせてやる
		West = np.average(u.West) * (zgrid[depth - 1] + np.average(ssh_Sec.West)) * self.NSdist
		East = np.average(u.East) * (zgrid[depth - 1] + np.average(ssh_Sec.East)) * self.NSdist
		North = np.average(v.North) * (zgrid[depth - 1] + np.average(ssh_Sec.North)) * self.EWdist
		South = np.average(v.South) * (zgrid[depth - 1] + np.average(ssh_Sec.South)) * self.EWdist
		Bottom = self.Square * np.average(w)
		return South, North, West, East, Bottom
Esempio n. 21
0
	def timeseries_of_area_averaged_value(self, fy, ly, var, depth, product_n = 3):
		import numpy as np
		import D
		import subroutine
		Yn = ly - fy + 1
		Timeseries = np.zeros(12 * Yn)
		months, label = subroutine.get_months_and_label(fy, ly)
		for year in range(fy, ly + 1):
			for month in range(1, 13):
				i = month - 1 + (year - fy) * 12
				Data = D.get_data(year, month, var, depth, product_n)
				tmp, _, _ = self.Get_data_of_area(Data, var, product_n)
				Timeseries[i] = np.average(tmp[np.where(np.isnan(tmp) == False)])

		return Timeseries, months, label
Esempio n. 22
0
def calc_d():
    try:
        global d
        d = D.calc_d(a, b, c, un)

        if d == set():
            l4["text"] = '{}'
        else:
            l4["text"] = str(d).replace('{', '').replace('}',
                                                         '').replace("'", '')

    except NameError:
        messagebox.showinfo("Error", "Check that you entered the sets!")
    except TypeError:
        messagebox.showinfo("Error", "Enter correct sets!")
Esempio n. 23
0
def cal_Salt_Transport(s, u, Zonal_or_Meridional, product_n):
	# 東西輸送量も鉛直輸送量もどっちも求められます。
	# Zonal : Zonal_or_Meridional = 0
	# Meridional : Zonal_or_Meridional = 1
	rho0 = 1024.0				# 面倒くさいので密度は一定値。
	yn = s.shape[0]
	xn = s.shape[1]
	zn = s.shape[2]
	ns_dist = subroutine.dist_on_sphere([0.0, - 0.5], [0.0, 0.5])
	xgrid, ygrid, zgrid = D.get_grid_value('s', product_n)
	Data = np.zeros((yn, xn, zn))

	for j in range(0, yn):
		if j == 0:
			Data[j, :, :] = np.nan
		else:
			y0 = j - 1
			y1 = j
			lat1 = ygrid[y1]

			if Zonal_or_Meridional == 0:
				L = ns_dist		# 東西輸送量であれば、南北に積分
			elif Zonal_or_Meridional == 1:
				L = subroutine.dist_on_sphere([0.0, lat1], [1.0, lat1]) # 南北輸送量であれば、東西に積分
			else:
				raise ValueError('your Zonal_or_Meridional argument is not valid!')

			for i in range(0, xn):
				x1 = i
				if i == 0:
					x0 = xn - 1
				else:
					x0 = i - 1


				if np.isnan(s[y1, x1, 0]) == True:
					Data[y1, x1, :] = np.nan
				else:
					for k in range(0, zn):
						# U = np.array([u[y0, x0, k], u[y1, x0, k], u[y0, x1, k], u[y1, x1, k]])
						# Data[y1, x1, k] = L * lz[k] * 1e-3 * rho0 * s[y1, x1, k] * \
						# 				  np.average(U[np.where(np.isnan(U) == False)])
						Data[y1, x1, k] = L * lz[k] * 1e-3 * rho0 * s[y1, x1, k] * \
										  average_of_4data(u[y0, x0, k], u[y1, x0, k], u[y0, x1, k], u[y1, x1, k])

	return Data
Esempio n. 24
0
def get_data_for_draw_12month_cycle(depthn = 1, product_n = 3, Ekman_or_Geost = 'Ekman', Vector_or_Scalar = 'Scalar'):
	xgrid, ygrid, zgrid = D.get_grid_value('u', product_n)
	if Ekman_or_Geost == 'Ekman':
		fname = 'Ekman'
	elif Ekman_or_Geost == 'Geost':
		fname = 'Geostrophic'
	else:
		raise ValueError("your Ekman_or_Geost argument is not vald!")

	if Vector_or_Scalar == 'Scalar':
		DataDrawN = 0
		Data = np.zeros((12, ygrid.size, xgrid.size))
	elif Vector_or_Scalar == 'Vector':
		DataDrawN = 1
		Data = np.zeros((12, 2, ygrid.size, xgrid.size))
	else:
		raise ValueError("your Vector_or_Scalar argument is not vald!")


	fy = 1990
	ly = 2011


	for i in range(0, 12):
		month = i + 1
		print 'month=', month
		# データの取得
		each_u = np.zeros((ygrid.size, xgrid.size, ly - fy + 1))
		each_v = np.zeros((ygrid.size, xgrid.size, ly - fy + 1))
		for year in range(fy, ly + 1):
			k = year - fy
			tmpu, tmpv = load_data_of_npz(year, month, product_n = product_n, Ekman_or_Geost = Ekman_or_Geost)
			each_u[:, :, k] = tmpu[:, :, depthn - 1]
			each_v[:, :, k] = tmpv[:, :, depthn - 1]

		if Vector_or_Scalar == 'Scalar':
			Data[i, :, :] = np.sqrt(np.average(each_u, axis = 2)**2, np.average(each_v, axis = 2)**2)
		elif Vector_or_Scalar == 'Vector':
			Data_d1 = np.average(each_u, axis = 2)
			Data_d2 = np.average(each_v, axis = 2)
			Data[i, :, :, :] = np.array([Data_d1, Data_d2])
		else:
			raise ValueError("your Vector_or_Scalar argument is not vald!")

	return Data
Esempio n. 25
0
	def save_data_as_npz(self, Timeseries, fy, ly, var = 's', depthn = 1, product_n = 3, Rawdata_or_Anomaly = 'Rawdata', fy_of_Anomalydata = 1990, ly_of_Anomalydata = 2011):
		import D
		import subroutine
		import Var
		title_name = D.Data[product_n].title_name
		xgrid, ygrid, zgrid = D.get_grid_value('s', product_n)
		vid = Var.var_to_id(var)
		formal_name = Var.VAR[vid].Get_formal_name()

		if Rawdata_or_Anomaly == 'Rawdata':
			fname_RA = formal_name
		elif Rawdata_or_Anomaly == 'Anomaly':
			fname_RA = '[Anomaly_of_' + formal_name + str(fy_of_Anomalydata) + '-' + str(ly_of_Anomalydata) + ']'
		else:
			raise ValueError('your Rawdata_or_Anomaly argument is not valid!')

		subroutine.save_npz(Timeseries, self.AreaName + '_of_' + fname_RA + '_at_' + str(zgrid[depthn - 1]) + \
							'm_Year' + str(fy) + '-' + str(ly) + '_' + title_name)
Esempio n. 26
0
	def Get_data_of_area(self, data, var, product_n):
		# 2次元のデータは勿論、3次元のデータもこの関数を使ってtrimmingできます。
		import D
		import numpy as np
		import convert
		xgrid, ygrid, zgrid = D.get_grid_value('t', product_n)
		if var == 'u' or var == 'v':
			if data.ndim == 3:
				data = convert.convert_UVgrid_value_to_Sgrid_value_3D(data)
			elif data.ndim == 2:
				data = convert.convert_UVgrid_value_to_Sgrid_value_2D(data)

		mx = np.where(xgrid == self.wlon)[0][0]
		nx = np.where(xgrid == self.elon)[0][0] + 1
		my = np.where(ygrid == self.slat)[0][0]
		ny = np.where(ygrid == self.nlat)[0][0] + 1
		data = data[my:ny, mx:nx]
		xgrid = xgrid[mx:nx]
		ygrid = ygrid[my:ny]
		return data, xgrid, ygrid
Esempio n. 27
0
def get_depth_of_minimum_of_vertical_gradient_from_profile(profile, product_n = 3):
	# プロファイルの鉛直勾配が負に最大になる深さを求める。
	# 塩分に関してはアラビア海向け。
	_, _, zgrid = D.get_grid_value('s', product_n)
	zn = zgrid.size
	Grad = np.zeros(zn)
	for k in range(1, zn - 1):
		dz = 0.5 * (zgrid[k + 1] - zgrid[k - 1])
		Grad[k] = (profile[k + 1] - profile[k - 1]) / dz

	Grad[np.where(np.isnan(Grad) == True)] = np.inf
	GradMin = np.min(Grad)

	if GradMin == 0.0:
		Depth_of_GradMin = np.nan
	else:
		K1 = np.where(Grad == GradMin)[0][0]
		Depth_of_GradMin = zgrid[K1]

	return Depth_of_GradMin
Esempio n. 28
0
def cal_UVg_from_UV_and_UVe(year, month, product_n, U_or_V_and_nu):
	if U_or_V_and_nu == 'U_5e-3':
		D2id = 23
		var = 'u'
	elif U_or_V_and_nu == 'V_5e-3':
		D2id = 24
		var = 'v'
	elif U_or_V_and_nu == 'U_1e-2':
		D2id = 29
		var = 'u'
	elif U_or_V_and_nu == 'V_1e-2':
		D2id = 30
		var = 'v'
	else:
		raise ValueError('your U_or_V argument is not valid!')

	D2D = D2.D2Data[D2id]
	Ue = D2D.load_data_of_npz(year, month, product_n)
	U = D.get_data(year, month, var, 0, product_n)
	Ug = U - Ue
	return Ug
Esempio n. 29
0
	def make_Ave_or_Std_of_Data(self, fy, ly, product_n, Ave_or_Std = 'Ave'):
		import D
		import numpy as np
		title_name = D.Data[product_n].title_name
		xgrid, ygrid, zgrid = D.get_grid_value('t', product_n) # tでもuでもよい
		if self.dimension == 2:
			AvSt_of_Data = np.zeros((ygrid.size, xgrid.size, 12))
			for month in range(1, 13):
				each_Data = np.zeros((ygrid.size, xgrid.size, ly - fy + 1))
				for year in range(fy, ly + 1):
					k = year - fy
					each_Data[:, :, k] = self.load_data_of_npz(year, month, product_n)

				if Ave_or_Std == 'Ave':
					AvSt_of_Data[:, :, month - 1] = np.average(each_Data, axis = 2)
				elif Ave_or_Std == 'Std':
					AvSt_of_Data[:, :, month - 1] = np.std(each_Data, axis = 2)
				else:
					raise ValueError('your Ave_or_Std argument is not valid!')

		elif self.dimension == 3:
			AvSt_of_Data = np.zeros((ygrid.size, xgrid.size, zgrid.size, 12))
			for month in range(1, 13):
				each_Data = np.zeros((ygrid.size, xgrid.size, zgrid.size, ly - fy + 1))
				for year in range(fy, ly + 1):
					k = year - fy
					each_Data[:, :, :, k] = self.load_data_of_npz(year, month, product_n)

				if Ave_or_Std == 'Ave':
					AvSt_of_Data[:, :, :, month - 1] = np.average(each_Data, axis = 2)
				elif Ave_or_Std == 'Std':
					AvSt_of_Data[:, :, :, month - 1] = np.std(each_Data, axis = 2)
				else:
					raise ValueError('your Ave_or_Std argument is not valid!')

		else:
			raise ValueError('your dimension is not valid!')


		return AvSt_of_Data
Esempio n. 30
0
	def cal_Salinity_Transport_Budget(self, year, month, depth = 10, product_n = 3):
		# その領域における塩分輸送量収支を計算する。
		# 注意! x,y,z軸方向のグリッドの大きさはすべて等しいと仮定して計算しております。
		# さらに、領域は南北に長くなく、EWdistで南北両側面の断面積を計算していいこととしております。
		import D
		import D2
		import Using_jit
		import numpy as np
		import Budget_at_Global_Ocean
		rho0 = 1024.0
		dS = Budget_at_Global_Ocean.cal_dSdt(year, month, product_n)[:, :, :depth]
		sff = D.get_data(year, month, 'sff', 1, product_n)
		s = D.get_data(year, month, 's', 0, product_n)[:, :, :depth]
		u = D.get_data(year, month, 'u', 0, product_n)[:, :, :depth]
		v = D.get_data(year, month, 'v', 0, product_n)[:, :, :depth]
		ssh = D2.D2Data[33].load_data_of_npz(year, month, product_n)
		ssh_Sec = self.Get_data_of_AreaSection_from_data(ssh, 'ht', product_n)
		ssh_Area, _, _ = self.Get_data_of_area(ssh, 'ht', product_n)
		s_bottom = s[:, :, depth - 1]
		s_surface = s[:, :, 0]
		w = D.get_data(year, month, 'w', depth, product_n)
		_, _, zgrid = D.get_grid_value('w', product_n)

		ZonTsp = Using_jit.cal_Salt_Transport(s, u, 0, product_n)
		MerTsp = Using_jit.cal_Salt_Transport(s, v, 1, product_n)
		ZonTsp = self.Get_data_of_AreaSection_from_data(ZonTsp, 's', product_n)
		MerTsp = self.Get_data_of_AreaSection_from_data(MerTsp, 's', product_n)

		# 海面力学高度の分だけ、各断面の高さに下駄を履かせてやる
		West = np.sum(ZonTsp.West) * (zgrid[depth - 1] + np.average(ssh_Sec.West)) / zgrid[depth - 1]
		East = np.sum(ZonTsp.East) * (zgrid[depth - 1] + np.average(ssh_Sec.East)) / zgrid[depth - 1]
		North = np.sum(MerTsp.North) * (zgrid[depth - 1] + np.average(ssh_Sec.North)) / zgrid[depth - 1]
		South = np.sum(MerTsp.South) * (zgrid[depth - 1] + np.average(ssh_Sec.South)) / zgrid[depth - 1]

		w, _, _ = self.Get_data_of_area(w, 'w', product_n)
		s_bottom, _, _ = self.Get_data_of_area(s_bottom, 's', product_n)
		Bottom = self.Square * np.average(1e-3 * rho0 * w * s_bottom)

		sff, _, _ = self.Get_data_of_area(sff, 'sff', product_n)
		s_surface, _, _ = self.Get_data_of_area(s_surface, 's', product_n)
		Surface = self.Square * np.average(1e-3 * rho0 * sff * s_surface) / (60 * 60 * 24 * 30.0)

		# 海面力学高度の分だけ、水柱の高さに下駄を履かせてやる
		dS, _, _ = self.Get_data_of_area(dS, 's', product_n)
		Change = 1e-3 * rho0 * np.average(dS) * (zgrid[depth - 1] + np.average(ssh_Area)) * self.Square / (60 * 60 * 24 * 30.0)

		return South, North, West, East, Bottom, Surface, Change
Esempio n. 31
0
def runTests():
    fileList = getInputFiles()
    listoflists = []
    no_test_case = 1
    for file_name in fileList:
        sublist = [no_test_case]
        if (no_test_case - 1) // 5 == 0:
            sublist.append("easy")
        elif (no_test_case - 1) // 5 == 1:
            sublist.append("moderate")
        else:
            sublist.append("hard")

        puzzle = extract_puzzle(file_name)

        a = algoA.Sudoku(puzzle)
        a.solve()
        sublist.extend([a.time, a.count])

        b = algoB.Sudoku(puzzle)
        b.solve()
        sublist.extend([b.time, b.count])

        c = algoC.Sudoku(puzzle)
        c.solve()
        sublist.extend([c.time, c.count])

        d = algoD.Sudoku(puzzle)
        d.solve()
        sublist.extend([d.time, d.count])

        e = algoE.Sudoku(puzzle)
        e.solve()
        sublist.extend([e.time, e.count])

        listoflists.append(sublist)
        no_test_case += 1
    return listoflists
Esempio n. 32
0
	def Get_data_of_AreaSection_from_data(self, data, var, product_n):
		# 例えばwlon=10.5, elon=19.5だったとしたら、
		# Data_wは経度10.0度、Data_e=は経度20度に於けるデータを取得することとします。
		import numpy as np
		import D
		import convert
		xgrid, ygrid, zgrid = D.get_grid_value('t', product_n)
		if var == 'u' or var == 'v':
			if data.ndim == 3:
				data = convert.convert_UVgrid_value_to_Sgrid_value_3D(data)
			elif data.ndim == 2:
				data = convert.convert_UVgrid_value_to_Sgrid_value_2D(data)

		mx = np.where(xgrid == self.wlon)[0][0]
		nx = np.where(xgrid == self.elon)[0][0] + 1
		my = np.where(ygrid == self.slat)[0][0]
		ny = np.where(ygrid == self.nlat)[0][0] + 1
		Data_S = 0.5 * (data[my - 1, mx:nx] + data[my, mx:nx])
		Data_N = 0.5 * (data[ny - 1, mx:nx] + data[ny, mx:nx])
		Data_W = 0.5 * (data[my:ny, mx - 1] + data[my:ny, mx])
		Data_E = 0.5 * (data[my:ny, nx - 1] + data[my:ny, nx])
		Data = EACH_SECTION(Data_S, Data_N, Data_W, Data_E)
		return Data
def exact_solver(img, alpha, mu0, rho):
    size = np.shape(img)
    iterations = 50

    Z = np.zeros((2 * size[0], size[1]))
    G = Z
    k = 0
    mu = mu0

    W = make_weight_matrix.make_weight_matrix(img, 5)

    while k < iterations:
        U = Z / mu
        A = alpha * W / mu
        T = T_solver.T_solver(img, mu, G, U)
        delT = D.D(T)
        G = G_solver.G_solver(A, (delT + U))
        B = delT - G
        Z = mu * (B + U)
        mu = mu * rho
        k = k + 1

    return T
def make_weight_matrix(img, ker_size):
    size = np.shape(img)
    p = size[0] * size[1]

    D_img = D.D(img)
    D_img_vec = np.reshape(D_img, (2 * p, 1), order='F')

    dtx = D_img_vec[0:p]
    dty = D_img_vec[p:2 * p]

    w_gauss = gaussian_filter.gaussian_filter((ker_size, 1), 2)
    w_gauss = np.reshape(w_gauss, w_gauss.size, order='F')
    dtx = np.reshape(dtx, dtx.size, order='F')
    dty = np.reshape(dty, dty.size, order='F')
    convl_x = conv.conv(dtx, w_gauss)
    convl_y = conv.conv(dty, w_gauss)

    w_x = 1.0 / (np.absolute(convl_x) + 0.0001)
    w_y = 1.0 / (np.absolute(convl_y) + 0.0001)

    W_vec = np.concatenate((w_x, w_y))
    W = np.reshape(W_vec, (2 * size[0], size[1]), order='F')

    return W
Esempio n. 35
0
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 25 16:58:24 2018

@author: omf
"""
import pandas as pd
import D

his = D.history_datas(instruments, start_date, end_date, fields)
fea = D.features(instruments, start_date, end_date, features)
data = pd.merge(his, fea, on=['date', 'instrument'], how='inner')
df = df.set_index('date', drop=True)


class conf:
    pass


def deal_instrument(df):
    '''
    一个完整的一只股票的数据
    '''
    result = []

    for i, d in enumerate(df.groupby(by='instrument')):
        d['return'] = 100 * (d['close'].shift(-5) / df['open'].shift(-1) - 1)
                              0.225])  # 给定均值:(R,G,B) 方差:(R,G,B),将会把Tensor正则化。
    # 即:Normalized_image=(image-mean)/std
])

#加载模型
print('load model begin!')
model = torch.load(
    '/home/momo/sun.zheng/Recognizing_Image_Style/model_F_l_0.01_SGD_epoch_8.pkl'
)
model.eval()
model = model.to(device)
print('load model done!')

#从数据集中加载测试数据
test_dataset = D.ImageFolder(
    root='/home/momo/data2/sun.zheng/flickr_style/test',
    transform=data_transform
)  # 这里使用自己写的data.py文件,ImageFolder不仅返回图片和标签,还返回图片的路径,方便后续方便保存
#test_dataset = torchvision.datasets.ImageFolder(root='/home/momo/mnt/data2/datum/raw/val2', transform=data_transform)
test_data = DataLoader(test_dataset, batch_size=1, shuffle=True, num_workers=4)
'''
路径/home/momo/data2/sun.zheng/flickr_style/test里面是原始测试集,
路径/home/momo/sun.zheng/Recognizing_Image_Style/test_result_to_other里面批量建立文件夹,每一类数据建立一个文件夹,
文件夹包括right和wrong两个文件夹,分别表示在保存的模型测试下,该类正确分类和错误分类的样本集合,错分文件夹里面是具体错分的类别
'''

count = 0  # 当前类别测试图片个数

for img1, label1, path1 in test_data:
    count = count + 1

    #img11 = img1.squeeze()  # 此处的squeeze()去除size为1的维度,,将(1,3,224,224)的tensor转换为(3,224,224)的tensor
Esempio n. 37
0
 def test_basic(self):
     s = D.solve(4, 5)
     self.assertEqual(s, -1)
     print "basicOK"
Esempio n. 38
0
	def Get_VerticalSection(self, year, month, var, product_n):
		import D
		data = D.get_data(year, month, var, 0, product_n)
		Data, hgrid, zgrid = self.Get_VerticalSection_from_data(data, var, product_n)
		return Data, hgrid, zgrid
Esempio n. 39
0
This example will be more in-depth than the first few, but a lot of the principles that we have already applied also apply here. As always, we'll go through it step by step and I'll do my best to explain each part so that it makes sense and is as easy to follow as I can make it. In this final example, we will be looking at Generative Adversarial Networks - affectionately known as GANs. The concept of GANs were first introduced by Ian Goodfellow and his team in 2014 (https://arxiv.org/abs/1406.2661), where they "proposed a new framework for estimating generative models via an an adversarial process". I'll get into this in much more detail, but essentially what is happening here is that we are going to train two neural networks (that will be adversaries), that will compete against one another in order to improve. One will be reffered to as the Discriminator and the other will be known as the Generator. We combine both of these networks to form a combined model known as the GAN for training. Once training has been completed, we want to be able to use the *trained* Generator network independently to generate new things!

![Image](./Figures/gan2.png)

The image above looks rather unassuming, it is simply a row of portraits of four different people. The interesting thing however, is that none of these people actually exist. They are not real. Each of these images has been generated by a Generative Adversarial Network known as StyleGAN. StyleGAN is a sophisticated GAN that has been curated and trained by NVIDIA and represents the state-of-the-art results in data-driven unconditional generative image modelling and is an impressive testament as to the possibilities of Generative Networks. Here is another video that demonstrates the capabilities of these methods (which is only 2 minutes long so I recommend you watch it because it's v cool) - https://www.youtube.com/watch?v=p5U4NgVGAwg. With that being said, lets take a closer look as to how these things actually work.

## Generative Adversarial Networks

![Image](./Figures/gan1.png)

The Generative Adversarial Network is a framework for estimating generative models via an adversarial process in which two neural networks compete against each other during training. It is a useful machine learning technique that learns to generate fake samples indistinguishable from real ones via a competitive game. Whilst this may sound a little confusing, the GAN is nothing more than a combined model where two neural networks are joined together; these are known as the Discriminator $D$ and the Generator $G$. The Discriminator $D$ is a classification network that is set up to maximise the probability of assigning the correct label to real (label 1) or fake (label 0) samples. Meanwhile, the Generator $G$ is trying to fool $D$ and generate new samples that the Discriminator believes came from the training set. Mathematically speaking, this corresponds to the following two-player minimax game with value function $V(G,D)$:

![Image](./Figures/minmax.png)

Where $x$ is the input to $D$ from the training set, $z$ is a vector of latent values input to $G$, $E_x$ is the expected value overall real data instances, $D(x)$ is the Discriminator’s estimate of the probability that real data instance $x$ is real, $E_z$ is the expected value over all random inputs to the generator and $D(G(z))$ is the discriminator’s estimate of the probability that a fake instance is real. The diagram above should help this bit make sense. To reiterate, the primary goal of $G$ is to fool $D$ and generate new samples that $D$ believes came from the training set (real). The primary goal of $D$ is to correctly classify real/fake samples by assigning a label of 0 to generated samples indicating a fake, and a label of 1 to true samples indicating that it is real and came from the training set. The training procedure for $G$ is to maximise the probablity of $D$ making a mistake i.e. an incorrect classification. In the space of arbitrary functions $G$ and $D$, a unique solution exists, with $G$ able to reproduce data with the same distribution as the training set and the output from $D$ ≈ 0.5 for all samples, which simply indicates that the discriminator can no longer differentiate between the training data and the data generated by $G$. Or in other words, the Generator $G$ has got so good at generating 'fake' data that $D$ can no longer tell the difference. The image below is taken from Google's training documentation about GANs and is worth a read as it (obviously) does a good job at explaining some of these concepts (https://developers.google.com/machine-learning/gan).

![Image](./Figures/forge.png)

~


## Training Set

First of all, we need to decide what we want our generative network to generate. Of course, NVIDIA's sophisticated StyleGAN is capable of generating human faces, but GANs are capable of generating new data regardless of the form that it comes in. GANs can be used to generate new audio signals, new images, new time-series data etc. GANs are capable of generating new data that is representative of the data that it was trained on (the training set). Therefore, in large, a key factor in the success of the GAN model lies in the quality of the training set. In this example, we will create a simple training set from the function $y=sin(x)$ and use the trained generator to produce similar values!

```{note}
Throughout this example I may use terms such as 'real' and 'fake' when referring to data. Real refers to data samples that come from the training set and 'fake' samples refer to any data that is produced by the Generator.
```

### Import Libraries
Esempio n. 40
0
def get_MeridionalGradient_of_variable(year, month, var, product_n = 3):
	data = D.get_data(year, month, var, 0, product_n)
	_, Data = get_Gradient_of_variable_from_data(data, var, product_n = product_n)
	return Data
    # 转换成形状为[C,H,W],取值范围是[0,1]的torch.FloadTensor
    transforms.Normalize(mean=[0.485, 0.456, 0.406],
                         std=[0.229, 0.224, 0.225])  # 给定均值:(R,G,B) 方差:(R,G,B),将会把Tensor正则化。
    # 即:Normalized_image=(image-mean)/std
])

#加载模型
print('load model begin!')
model = torch.load('/home/momo/sun.zheng/pytorch_imagenet/model_f.pkl')
model.eval()  # 固定训练模型的batchnorm以及dropout等的参数
model= model.to(device)
print('load model done!')


#从数据集中加载测试数据
test_dataset = D.ImageFolder(root='/home/momo/mnt/data2/datum/raw/val2', transform=data_transform)  # 这里使用自己写的data.py文件,ImageFolder不仅返回图片和标签,还返回图片的路径,方便后续方便保存
#test_dataset = torchvision.datasets.ImageFolder(root='/home/momo/mnt/data2/datum/raw/val2', transform=data_transform)
test_data = DataLoader(test_dataset, batch_size=1, shuffle=True, num_workers=4)

'''
路径/home/momo/sun.zheng/pytorch_imagenet/classify_test_data_result/val2里面是原始测试集,
路径/home/momo/sun.zheng/pytorch_imagenet/classify_test_data_result/test_result里面批量建立文件夹,每一类数据建立一个文件夹,
文件夹包括right和wrong两个文件夹,分别表示在保存的模型测试下,该类正确分类和错误分类的样本集合
'''


count = 0  # 当前类别测试图片个数,上限是1450000



for img1, label1, path1 in test_data:
Esempio n. 42
0
def get_depth_of_minimum_of_vertical_gradient(year, month, var, product_n = 3):
	data = D.get_data(year, month, var, 0, product_n)
	Depth_of_GradMax = get_depth_of_minimum_of_vertical_gradient_from_data(data, var, product_n = product_n)
	return Depth_of_GradMax
Esempio n. 43
0
import D

oggi = D.D(25, 2, 2021)
oggi.out()

#oggi.mod(30,2,2021)

#oggi = D.D(1,25,2021)
Esempio n. 44
0
from A import book

v1 = input('Enter Book name : ')
v2 = input('Enter Book cost : ')
print()
book(v1, v2)

input()
time.sleep(3)
# 3rd

print()
print()

import B as ks
v1 = float(input('Enter amount to deposite : '))
print()
ks.deposite(v1)

input()
time.sleep(3)
# 4th
print()
print()

import D
v1 = float(input('Enter amount to withdraw : '))
print()
D.withdraw(v1)