Exemplo n.º 1
0
def parseresultsfromdiskioserial(md,filename):    # {{{
	#Open file
	try:
		fid=open(filename,'rb')
	except IOError as e:
		raise IOError("loadresultsfromdisk error message: could not open '%s' for binary reading." % filename)

	#initialize results: 
	saveres=[]

	#Read fields until the end of the file.
	loadres=ReadData(fid,md)

	counter=0
	check_nomoresteps=0
	step=loadres['step']

	while loadres:
		#check that the new result does not add a step, which would be an error: 
		if check_nomoresteps:
			if loadres['step']>=1:
				raise TypeError("parsing results for a steady-state core, which incorporates transient results!")

		#Check step, increase counter if this is a new step
		if(step!=loadres['step'] and loadres['step']>1):
			counter = counter + 1
			step    = loadres['step']

		#Add result
		if loadres['step']==0:
			#if we have a step = 0, this is a steady state solution, don't expect more steps. 
			index = 0;
			check_nomoresteps=1
		elif loadres['step']==1:
			index = 0
		else:
			index = counter;
		
		if index > len(saveres)-1:
			for i in xrange(len(saveres)-1,index-1):
				saveres.append(None)
			saveres.append(resultsclass.results())
		elif saveres[index] is None:
			saveres[index]=resultsclass.results()
			
		#Get time and step
		if loadres['step'] != -9999.:
			saveres[index].__dict__['step']=loadres['step']
		if loadres['time'] != -9999.:
			saveres[index].__dict__['time']=loadres['time']

		#Add result
		saveres[index].__dict__[loadres['fieldname']]=loadres['field']

		#read next result
		loadres=ReadData(fid,md)

	fid.close()

	return saveres
 def run(self):
     flag = True
     result = {}
     while flag:
         if pegasus_path !="":
             #Use subprocess to call pegasus-status command
             p = subprocess.Popen([r'pegasus-status', pegasus_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
             output = p.communicate()
             #Output type is a list, the bandwidth information is in the first position of the list
             # the type is a string
             s = (output[0].split("%DONE\n")[1]).split("Summary")[0]
             #Parsing the result of pegasus-status
             vs = re.findall(r'\d+', s)
             # Order of the parsed results
             # READY   PRE  IN_Q  POST  DONE  FAIL %DONE STATE DAGNAME
             result['unready'] = int(vs[0])
             result['ready'] = int(vs[1])
             result['pre'] = int(vs[2])
             result['in_queue'] = int(vs[3])
             result['post'] = int(vs[4])
             result['done'] = int(vs[5])
             result['fail'] = int(vs[6])
             result['percent_done'] = float(vs[7])
             print result
             #Send the result to the NaradaMetrics CIS
             results(result)
             
         time.sleep(5)
         if result['percent_done'] == 100.0:
             flag = False
Exemplo n.º 3
0
def titanic_pipeline():
    train, test = loaddata()
    train_proc, test_proc = dataprocessing(train, test)
    train_feat, train_labels = featureengineering(train_proc, test_proc)

    rf_acc = randomforest(train_feat, train_labels)
    svm_acc = svm(train_feat, train_labels)
    lg_acc = logistic_regression(train_feat, train_labels)

    results(svm_acc, lg_acc, rf_acc)
Exemplo n.º 4
0
def loadresultsfromdisk(md,filename):
	"""
	LOADRESULTSFROMDISK - load results of solution sequence from disk file "filename"            
 
	   Usage:
	      md=loadresultsfromdisk(md=False,filename=False);
	"""

	#check number of inputs/outputs
	if not md or not filename:
		raise ValueError("loadresultsfromdisk: error message.")

	if not md.qmu.isdakota:

		#Check that file exists
		if not os.path.exists(filename):
			raise OSError("binary file '%s' not found." % filename)

		#initialize md.results if not a structure yet
		if not isinstance(md.results,results):
			md.results=results()

		#load results onto model
		structure=parseresultsfromdisk(filename,not md.settings.io_gather)
		if not len(structure):
			raise RuntimeError("No result found in binary file '%s'. Check for solution crash." % filename)
		setattr(md.results,structure[0].SolutionType,structure)

		#recover solution_type from results
		md.private.solution=structure[0].SolutionType

		#read log files onto fields
		if os.path.exists(md.miscellaneous.name+'.errlog'):
			with open(md.miscellaneous.name+'.errlog','r') as f:
				setattr(getattr(md.results,structure[0].SolutionType)[0],'errlog',[line[:-1] for line in f])
		else:
			setattr(getattr(md.results,structure[0].SolutionType)[0],'errlog',[])

		if os.path.exists(md.miscellaneous.name+'.outlog'):
			with open(md.miscellaneous.name+'.outlog','r') as f:
				setattr(getattr(md.results,structure[0].SolutionType)[0],'outlog',[line[:-1] for line in f])
		else:
			setattr(getattr(md.results,structure[0].SolutionType)[0],'outlog',[])

		if len(getattr(md.results,structure[0].SolutionType)[0].errlog):
			print ("loadresultsfromcluster info message: error during solution. Check your errlog and outlog model fields.")

		#if only one solution, extract it from list for user friendliness
		if len(structure) == 1 and not m.strcmp(structure[0].SolutionType,'TransientSolution'):
			setattr(md.results,structure[0].SolutionType,structure[0])

	#post processes qmu results if necessary
	else:

		if not isinstance(md.private.solution,str):
			[md.private.solution]=EnumToString(md.private.solution)
		md=postqmu(md)
		os.chdir('..')

	return md
Exemplo n.º 5
0
    def do_GET(self):
        if self.path == '/':
            return self.initial_redirect()
        if self.path[:1] != '/':
            return self.send_error(
                400, "Path %s does not begin with /" % ` self.path `)

        try:
            id, req = string.split(self.path[1:], '/', 1)
        except ValueError:
            return self.send_error(404, "Missing id and path")

        if not req:
            # Initial request. Need to get a file list
            return self.list_tests(id)
        elif req == 'report':
            self.send_response(200)
            self.send_header('Content-Type', "text/plain")
            self.end_headers()
            res = results.results(id)
            res.write_report(self.wfile)
            del res
            return
        else:
            return self.handle_request(id, req)
Exemplo n.º 6
0
def train_model(num_frames):
    env = make_atari('PongNoFrameskip-v4')
    env = wrap_deepmind(env,episode_life=True, frame_stack=True)
    train_results = results.results(globals())

    cumulative_frames = 0
    best_score = -50
    games = 0
    full_loss = []
    rewards = []
    while 1:
        state = env.reset()
        done = False
        cum_reward = 0
        cum_loss = []
        while not done:
            action = select_action(torch.tensor(np.array(state).reshape(-1, 4, HEIGHT, WIDTH)).to(device), cumulative_frames)

            next_state, reward, done, _ = env.step(action)

            memory.add(state, action, reward, next_state, reward)

            state = next_state
            if cumulative_frames % TRAIN_FREQUENCY == 0 and cumulative_frames > LEARNING_STARTS:
                loss = optimize_model(cumulative_frames)
                cum_loss.append(loss)
            
            cum_reward += reward
            cumulative_frames += 1
        
            if cumulative_frames % TARGET_UPDATE == 0:
                target_net.load_state_dict(policy_net.state_dict())

        if best_score < cum_reward:
            best_score = cum_reward
        if len(cum_loss) == 0:
            full_loss.append(0)
        else:
            full_loss.append(np.mean(cum_loss))
        rewards.append(cum_reward)
        games += 1

        if games % 10 == 0:
            print("=============================================")
            print("Game: {} | Frame {}".format(games, cumulative_frames))
            print("Final reward: {}".format(cum_reward))
            print("Epsilon after: {}".format(EPSILON))
            print("Best High Score: {}".format(best_score))
            print("Avg Loss Last 100 games: {}".format(
                np.mean(full_loss[-100:])))
            print("Avg Reward Last 100 games: {}".format(
                np.mean(rewards[-100:])))

        train_results.record(cumulative_frames, games, EPSILON, cum_reward, full_loss[-1])

        if np.mean(rewards[-100:]) >= 18 and cumulative_frames > LEARNING_STARTS:
            break

    torch.save(target_net.state_dict(), PATH)
    train_results.close()
Exemplo n.º 7
0
def run(test_fold):
    useGIS = False
    featnames, dacc, dgps, dgis = getFeatNames(featDir, gis=useGIS)
    records = [z for z in os.listdir(featDir) if isdir(join(featDir, z))]
    teRec = records[test_fold]
    trRec = records[:test_fold] + records[test_fold + 1:]
    print "test data", teRec
    # load test data
    print "Loading test data"
    teFeats, teLabels = load([teRec], gis=useGIS)
    # format labels
    teL, cnts, modes = labels.getSenseCamLabel(teLabels)
    # load training data
    print "Loading training data"
    trFeats, trLabels = load(trRec, gis=useGIS)
    # format labels
    trL, cnts, modes = labels.getSenseCamLabel(trLabels)
    # normalize features
    trFeats, m, s = normalize(trFeats)
    teFeats = normalize(teFeats, m, s)
    # train random forest
    idx1 = (trL > 0).nonzero()[0]
    idx2 = (teL > 0).nonzero()[0]
    rf = train_rf(trFeats[idx1,:], trL[idx1,0], nTrees=50, nFeats=25)
    y_pr = rf.predict(teFeats)
    y_po = rf.predict_proba(teFeats)
    print "Test accuracy", rf.score(teFeats[idx2,:], teL[idx2,0])
    Rout = results.results(teL[idx2,0], y_pr[idx2], modes)
    Rout.print_results()
Exemplo n.º 8
0
    def do_GET(self):
        if self.path == '/':
            return self.initial_redirect()
        if self.path[:1] != '/':
            return self.send_error(400,
                                   "Path %s does not begin with /" % `self.path`)

        try:
            id, req = string.split(self.path[1:], '/', 1)
        except ValueError:
            return self.send_error(404, "Missing id and path")

        if not req:
            # Initial request. Need to get a file list
            return self.list_tests(id)
        elif req == 'report':
            self.send_response(200)
            self.send_header('Content-Type', "text/plain")
            self.end_headers()
            res = results.results(id)
            res.write_report(self.wfile)
            del res
            return
        else:
            return self.handle_request(id, req)
Exemplo n.º 9
0
def run(test_fold):
    useGIS = False
    featnames, dacc, dgps, dgis = getFeatNames(featDir, gis=useGIS)
    records = [z for z in os.listdir(featDir) if isdir(join(featDir, z))]
    teRec = records[test_fold]
    trRec = records[:test_fold] + records[test_fold + 1:]
    print "test data", teRec
    # load test data
    print "Loading test data"
    teFeats, teLabels = load([teRec], gis=useGIS)
    # format labels
    teL, cnts, modes = labels.getSenseCamLabel(teLabels)
    # load training data
    print "Loading training data"
    trFeats, trLabels = load(trRec, gis=useGIS)
    # format labels
    trL, cnts, modes = labels.getSenseCamLabel(trLabels)
    # normalize features
    trFeats, m, s = normalize(trFeats)
    teFeats = normalize(teFeats, m, s)
    # train random forest
    idx1 = (trL > 0).nonzero()[0]
    idx2 = (teL > 0).nonzero()[0]
    rf = train_rf(trFeats[idx1, :], trL[idx1, 0], nTrees=50, nFeats=25)
    y_pr = rf.predict(teFeats)
    y_po = rf.predict_proba(teFeats)
    print "Test accuracy", rf.score(teFeats[idx2, :], teL[idx2, 0])
    Rout = results.results(teL[idx2, 0], y_pr[idx2], modes)
    Rout.print_results()
Exemplo n.º 10
0
def parseresultsfromdiskiosplit(md,filename):    # {{{

	#Open file
	try:
		fid=open(filename,'rb')
	except IOError as e:
		raise IOError("loadresultsfromdisk error message: could not open '%s' for binary reading." % filename)

	saveres=[]

	#if we have done split I/O, ie, we have results that are fragmented across patches, 
	#do a first pass, and figure out the structure of results
	loadres=ReadDataDimensions(fid)
	while loadres:

		#Get time and step
		if loadres['step'] > len(saveres):
			for i in xrange(len(saveres),loadres['step']-1):
				saveres.append(None)
			saveres.append(resultsclass.results())
		setattr(saveres[loadres['step']-1],'step',loadres['step'])
		setattr(saveres[loadres['step']-1],'time',loadres['time']) 

		#Add result
		setattr(saveres[loadres['step']-1],loadres['fieldname'],float('NaN'))

		#read next result
		loadres=ReadDataDimensions(fid)

	#do a second pass, and figure out the size of the patches
	fid.seek(0)    #rewind
	loadres=ReadDataDimensions(fid)
	while loadres:

		#read next result
		loadres=ReadDataDimensions(fid)

	#third pass, this time to read the real information
	fid.seek(0)    #rewind
	loadres=ReadData(fid,md)
	while loadres:

		#Get time and step
		if loadres['step']> len(saveres):
			for i in xrange(len(saveres),loadres['step']-1):
				saveres.append(None)
			saveres.append(saveresclass.saveres())
		setattr(saveres[loadres['step']-1],'step',loadres['step'])
		setattr(saveres[loadres['step']-1],'time',loadres['time']) 

		#Add result
		setattr(saveres[loadres['step']-1],loadres['fieldname'],loadres['field'])

		#read next result
		loadres=ReadData(fid,md)

	#close file
	fid.close()

	return saveres
Exemplo n.º 11
0
 def __init__(self, dbname=None):
     self.__checkmakedef()
     if dbname == None:
         self.con = sqlite3.connect("results.db")
     else: 
         self.con = sqlite3.connect(dbname)
     self.con.check_same_thread = False
     self.con.enable_callback_tracebacks = True
     self.cur = self.con.cursor()
     self.cur.execute("PRAGMA auto_vacuum = 1;")
     self.gentestid = gentestid(self.cur, "gentestid")
     self.results = results(self.cur, "results", self.gentestid)
     ref = (self.gentestid, self.results)
     self.create = create(self.cur, "create_", ref, "create")
     self.setattr = setattr(self.cur, "setattr", ref, "setattr")
     self.getattr = getattr(self.cur, "getattr", ref, "getattr")
     self.list = getattr(self.cur, "list", ref, "list")
     self.listattr = listattr(self.cur, "listattr", ref, "list")
     self.query = query(self.cur, "query", ref, "query")
     self.sma = sma(self.cur, "sma", ref, "set_member_attributes")
     self.coll = coll(self.cur, "coll", ref, "time-db")
     self.obj = obj(self.cur, "obj", ref, "time-db")
     self.attr = attr(self.cur, "attr", ref, "time-db")
     self.date = time.strftime('%F-%T')
     v = commands.getoutput('svn info | grep "^Rev" | cut -f2 -d\ ')
     self.version = int(v)
Exemplo n.º 12
0
 def __init__(self, dbname=None):
     self.__checkmakedef()
     if dbname == None:
         self.con = sqlite3.connect("results.db")
     else:
         self.con = sqlite3.connect(dbname)
     self.con.check_same_thread = False
     self.con.enable_callback_tracebacks = True
     self.cur = self.con.cursor()
     self.cur.execute("PRAGMA auto_vacuum = 1;")
     self.gentestid = gentestid(self.cur, "gentestid")
     self.results = results(self.cur, "results", self.gentestid)
     ref = (self.gentestid, self.results)
     self.create = create(self.cur, "create_", ref, "create")
     self.setattr = setattr(self.cur, "setattr", ref, "setattr")
     self.getattr = getattr(self.cur, "getattr", ref, "getattr")
     self.list = getattr(self.cur, "list", ref, "list")
     self.listattr = listattr(self.cur, "listattr", ref, "list")
     self.query = query(self.cur, "query", ref, "query")
     self.sma = sma(self.cur, "sma", ref, "set_member_attributes")
     self.coll = coll(self.cur, "coll", ref, "time-db")
     self.obj = obj(self.cur, "obj", ref, "time-db")
     self.attr = attr(self.cur, "attr", ref, "time-db")
     self.date = time.strftime('%F-%T')
     v = commands.getoutput('svn info | grep "^Rev" | cut -f2 -d\ ')
     self.version = int(v)
Exemplo n.º 13
0
Arquivo: model.py Projeto: pf4d/issm
    def __init__(self):  #{{{

        # classtype=model.properties

        # for classe in dict.keys(classtype):
        # 	print classe
        # 	self.__dict__[classe] = classtype[str(classe)]

        self.mesh = mesh2d()
        self.mask = mask()
        self.geometry = geometry()
        self.constants = constants()
        self.smb = SMBforcing()
        self.basalforcings = basalforcings()
        self.materials = matice()
        self.damage = damage()
        self.friction = friction()
        self.flowequation = flowequation()
        self.timestepping = timestepping()
        self.initialization = initialization()
        self.rifts = rifts()
        self.slr = slr()

        self.debug = debug()
        self.verbose = verbose()
        self.settings = settings()
        self.toolkits = toolkits()
        self.cluster = generic()

        self.balancethickness = balancethickness()
        self.stressbalance = stressbalance()
        self.groundingline = groundingline()
        self.hydrology = hydrologyshreve()
        self.masstransport = masstransport()
        self.thermal = thermal()
        self.steadystate = steadystate()
        self.transient = transient()
        self.levelset = levelset()
        self.calving = calving()
        self.gia = giaivins()

        self.autodiff = autodiff()
        self.inversion = inversion()
        self.qmu = qmu()
        self.amr = amr()

        self.results = results()
        self.outputdefinition = outputdefinition()
        self.radaroverlay = radaroverlay()
        self.miscellaneous = miscellaneous()
        self.private = private()
Exemplo n.º 14
0
    def btn_New_Patient_clicked(self):
        """This function is called when the ADD PATIENT button is pressed.
                            :param value: research name combo-box value.
                            :type value: String.
                            :return value: NONE.
                            """
        print("log - New Parinent - clicked"
              )  # we will just print clicked when the button is pressed
        data_section = self.spinBoxDSec.value()
        treshold = self.doubleBoxTreshold.value()
        print(data_section, " - data section")
        patient_result = results("Uri")

        res = patient_result.data_section(data_section)
        resu_corr = patient_result.correlation_matrix(res)
        patient_result.binary_matrix(resu_corr, treshold)
def test_results():
    path = 'results/test/test.pkl'
    m = KnnClassifier()
    d = Toy() 
    s = CrossValidation()
    e = [Accuracy()]
    p = expConfig(dataset=d,
                  setting=s,
                  model=m,
                  metrics=e,
                  resultPath=path)
    p.skip_if_file_exist = False
    p.run()

    r = results(root_dir='results/test')
    x = r.load()
    assert_almost_equals(x[0].metrics[0].values[2], 0.8) 
Exemplo n.º 16
0
def parseresultsfromdiskiosplit(filename):    # {{{
	"""
	PARSERESULTSFROMDISKIOSPLIT - ...
	 
	    Usage:
	       results=parseresultsfromdiskiosplit(filename)
	"""

	#Open file
	try:
		fid=open(filename,'rb')
	except IOError as e:
		raise IOError("loadresultsfromdisk error message: could not open '%s' for binary reading." % filename)

	results=[]

	#if we have done split I/O, ie, we have results that are fragmented across patches, 
	#do a first pass, and figure out the structure of results
	result=ReadDataDimensions(fid)
	while result:

		#Get time and step
		if result['step'] > len(results):
			for i in range(len(results),result['step']-1):
				results.append(None)
			results.append(resultsclass.results())
		setattr(results[result['step']-1],'step',result['step'])
		setattr(results[result['step']-1],'time',result['time']) 

		#Add result
		setattr(results[result['step']-1],result['fieldname'],float('NaN'))

		#read next result
		result=ReadDataDimensions(fid)

	#do a second pass, and figure out the size of the patches
	fid.seek(0)    #rewind
	result=ReadDataDimensions(fid)
	while result:

		#read next result
		result=ReadDataDimensions(fid)

	#third pass, this time to read the real information
	fid.seek(0)    #rewind
	result=ReadData(fid)
	while result:

		#Get time and step
		if result['step']> len(results):
			for i in range(len(results),result['step']-1):
				results.append(None)
			results.append(resultsclass.results())
		setattr(results[result['step']-1],'step',result['step'])
		setattr(results[result['step']-1],'time',result['time']) 

		#Add result
		setattr(results[result['step']-1],result['fieldname'],result['field'])

		#read next result
		result=ReadData(fid)

	#close file
	fid.close()

	return results
Exemplo n.º 17
0
if input_cipher == 'C':
    message = caessar.caessar(input_text, encoding_key, input_alphabet)
    if input_encode_decode == 'E':
        result = message.encode()
    elif input_encode_decode == 'D':
        result = message.decode()

if input_cipher == 'Af':
    message = affine.affine(input_text, encoding_key1, encoding_key2,
                            input_alphabet)
    if input_encode_decode == 'E':
        result = message.encode()
    elif input_encode_decode == 'D':
        result = message.decode()

if input_cipher == 'At':
    message = atbash.atbash(input_text, input_alphabet)
    if input_encode_decode == 'E':
        result = message.encode()
    elif input_encode_decode == 'D':
        result = message.decode()

if input_cipher == 'V':
    message = vigenere.vigenere(input_text, encoding_keyword, input_alphabet)
    if input_encode_decode == 'E':
        result = message.encode()
    elif input_encode_decode == 'D':
        result = message.decode()

results.results(input_text, input_encode_decode, input_cipher, result)
Exemplo n.º 18
0
def stats():
    d = dict(
        zip(['Active Cases', 'Cured / Discharged', 'Deaths', 'Migrated'],
            results()))
    return render_template('front.html', data=d)
Exemplo n.º 19
0
y_val = tf.keras.utils.to_categorical(y_val, 10)

# Generate the model with or without (default) dropout
model = MyModel(use_dp=True)
model = model.model()

options = tf.keras.optimizers.Adam(learning_rate=0.001,
                                   beta_1=0.9,
                                   beta_2=0.999,
                                   epsilon=1e-7)

model.compile(optimizer=options,
              loss=tf.losses.categorical_crossentropy,
              metrics=['accuracy'])

results(model, x_train, y_train, x_val, y_val, x_test, y_test)

# Save model features
tf.keras.utils.plot_model(model, to_file='mnist_model.png', show_shapes=True)
model.save('mnist_model.h5')

# Save individual config
config = model.get_config()
weights = model.get_weights()
with open('modelsummary.json', 'w') as f:
    with redirect_stdout(f):
        model.summary()

with open("config.json", "w") as file:
    json.dump(config, file)
Exemplo n.º 20
0
# "numpy.einsum" - Evaluates the Einstein summation convention on the operands.
# Using the Einstein summation convention, many common multi-dimensional, linear algebraic array operations can be represented in a simple fashion. In implicit mode einsum computes these values.
# In explicit mode, einsum provides further flexibility to compute other array operations that might not be considered classical Einstein summation operations, by disabling, or forcing summation over specified subscript labels.
a = numpy.arange(36).reshape(6, 6)
print(type(a))
# b = pandas.DataFrame(numpy.einsum('ijk->ik',a.reshape(-1,3,a.shape[1]))/3.0)
# print()
# print(a)
# print(b)
# # print(a.shape[1])
# print()
# print(numpy.einsum('ijk->ik',a.reshape(-1,3,a.shape[1])))
# results_test.chooseFile()

# "Choose a file" scenario
result = results("Uri")
# result.uploadResults(result.chooseFile())

# "Choose a data section" scenario with default section = 3
# result.data_section()

# "Choose a data section" scenario with chosen section
# result.data_section(2) # Data Section = 2
# result.data_section(3) # Data Section = 3
# result.data_section(4) # Data Section = 4
res = result.data_section(5)  # Data Section = 5

# "Return the correlation matrix" scenario
resu_corr = result.correlation_matrix(res)
# print(resu_corr)
Exemplo n.º 21
0
from results import results
import matplotlib.pyplot as plt

r = results()
data = r.get_data_multi_exp([
    'reacher_sac_no_value_fnc_18-06-2019_10-17-44',
    'visual_reacher_new_lr_old_update_21-06-2019_21-34-40'
], ['symbolic', 'visual'])
r.plot_steps(data)
plt.show()
Exemplo n.º 22
0
wrt_file = 'log_order%s_%s%s_%s%s_%s.txt' % (order, ns_cal, set_id_cal, ns_val,
                                             set_id_val,
                                             time.strftime("%Y-%m-%d_h%Hm%M"))

### TOTAL CALIBRATION PROCEDURE ###

## Read the calibration microstructures and build the microstructure function
msf.msf(el_cal, ns_cal, Hi, H, order, set_id_cal, wrt_file)

## Read the responses from the FE .dat files and perform the fftn for the calibration
fegrab.fegrab(el_cal, ns_cal, set_id_cal, wrt_file)

## Perform the calibration
calibration.calibration_procedure(el_cal, ns_cal, H, set_id_cal, wrt_file)

### TOTAL VALIDATION PROCEDURE ###

## Read the validation microstructures and build the microstructure function
msf.msf(el_val, ns_val, Hi, H, order, set_id_val, wrt_file)

## Read the responses from the FE .dat files and perform the fftn for the validation
fegrab.fegrab(el_val, ns_val, set_id_val, wrt_file)

## Perform the validation
validation_viz.validation_zero_pad(el_cal, el_val, ns_cal, ns_val, H,
                                   set_id_cal, set_id_val, wrt_file)

## Calculate the results of the validation
results.results(el_val, ns_val, set_id_val, 'epsilon', wrt_file)
Exemplo n.º 23
0
from results import results
from snail import send_email
import setup

results(setup.number_of_days, setup.include_today, setup.number_of_runs)
send_email()
Exemplo n.º 24
0
def mechanicalproperties(md, vx, vy, **kwargs):
    """
	MECHANICALPROPERTIES - compute stress and strain rate for a goven velocity
	
   this routine computes the components of the stress tensor
   strain rate tensor and their respective principal directions.
   the results are in the model md: md.results
	
   Usage:
      md=mechanicalproperties(md,vx,vy)
	
   Example:
      md=mechanicalproperties(md,md.initialization.vx,md.initialization.vy)
      md=mechanicalproperties(md,md.inversion.vx_obs,md.inversion.vy_obs)
	"""

    #some checks
    if len(vx) != md.mesh.numberofvertices or len(
            vy) != md.mesh.numberofvertices:
        raise ValueError('the input velocity should be of size ' +
                         md.mesh.numberofvertices)

    #if md.mesh.dimension!=2:
    #	raise StandardError('only 2D model supported currently')

    if np.any(md.flowequation.element_equation != 2):
        print(
            'Warning: the model has some non SSA elements. These will be treated like SSA elements'
        )

#unpack kwargs
    if 'damage' in kwargs:
        damage = kwargs.pop('damage')
        if len(damage) != md.mesh.numberofvertices:
            raise ValueError('if damage is supplied it should be of size ' +
                             md.mesh.numberofvertices)
        if np.ndim(damage) == 2:
            damage = damage.reshape(-1, )
    else:
        damage = None

    if np.ndim(vx) == 2:
        vx = vx.reshape(-1, )
    if np.ndim(vy) == 2:
        vy = vy.reshape(-1, )

    #initialization
    numberofelements = md.mesh.numberofelements
    numberofvertices = md.mesh.numberofvertices
    index = md.mesh.elements
    summation = np.array([[1], [1], [1]])
    directionsstress = np.zeros((numberofelements, 4))
    directionsstrain = np.zeros((numberofelements, 4))
    valuesstress = np.zeros((numberofelements, 2))
    valuesstrain = np.zeros((numberofelements, 2))

    #compute nodal functions coefficients N(x,y)=alpha x + beta y +gamma
    alpha, beta = GetNodalFunctionsCoeff(index, md.mesh.x, md.mesh.y)[0:2]

    #compute shear
    vxlist = vx[index - 1] / md.constants.yts
    vylist = vy[index - 1] / md.constants.yts
    ux = np.dot((vxlist * alpha), summation).reshape(-1, )
    uy = np.dot((vxlist * beta), summation).reshape(-1, )
    vx = np.dot((vylist * alpha), summation).reshape(-1, )
    vy = np.dot((vylist * beta), summation).reshape(-1, )
    uyvx = (vx + uy) / 2.
    #clear vxlist vylist

    #compute viscosity
    nu = np.zeros((numberofelements, ))
    B_bar = np.dot(md.materials.rheology_B[index - 1],
                   summation / 3.).reshape(-1, )
    power = ((md.materials.rheology_n - 1.) /
             (2. * md.materials.rheology_n)).reshape(-1, )
    second_inv = (ux**2. + vy**2. + ((uy + vx)**2.) / 4. + ux * vy).reshape(
        -1, )

    #some corrections
    location = np.nonzero(np.logical_and(second_inv == 0, power != 0))
    nu[location] = 10 ^ 18  #arbitrary maximum viscosity to apply where there is no effective shear

    if 'matice' in md.materials.__module__:
        location = np.nonzero(second_inv)
        nu[location] = B_bar[location] / (second_inv[location]**
                                          power[location])
        location = np.nonzero(np.logical_and(second_inv == 0, power == 0))
        nu[location] = B_bar[location]
        location = np.nonzero(np.logical_and(second_inv == 0, power != 0))
        nu[location] = 10 ^ 18
    elif 'matdamageice' in md.materials.__module__ and damage is not None:
        print('computing damage-dependent properties!')
        Zinv = np.dot(1 - damage[index - 1], summation / 3.).reshape(-1, )
        location = np.nonzero(second_inv)
        nu[location] = Zinv[location] * B_bar[location] / np.power(
            second_inv[location], power[location])
        location = np.nonzero(np.logical_and(second_inv == 0, power == 0))
        nu[location] = Zinv[location] * B_bar[location]
        #clear Zinv
    else:
        raise Exception('class of md.materials (' + md.materials.__module__ +
                        ') not recognized or not supported')

    #compute stress
    tau_xx = nu * ux
    tau_yy = nu * vy
    tau_xy = nu * uyvx

    #compute principal properties of stress
    for i in np.arange(numberofelements):

        #compute stress and strainrate matrices
        stress = np.array([[tau_xx[i], tau_xy[i]], [tau_xy[i], tau_yy[i]]])
        strain = np.array([[ux[i], uyvx[i]], [uyvx[i], vy[i]]])

        #eigenvalues and vectors for stress
        value, directions = np.linalg.eig(stress)
        idx = abs(value).argsort()[::-1]  # sort in descending order
        value = value[idx]
        directions = directions[:, idx]
        valuesstress[i, :] = [value[0], value[1]]
        directionsstress[i, :] = directions.transpose().flatten()

        #eigenvalues and vectors for strain
        value, directions = np.linalg.eig(strain)
        idx = abs(value).argsort()[::-1]  # sort in descending order
        value = value[idx]
        directions = directions[:, idx]
        valuesstrain[i, :] = [value[0], value[1]]
        directionsstrain[i, :] = directions.transpose().flatten()

    ##plug onto the model
    ##NB: Matlab sorts the eigen value in increasing order, we want the reverse
    stress = results()
    stress.xx = tau_xx
    stress.yy = tau_yy
    stress.xy = tau_xy
    stress.principalvalue1 = valuesstress[:, 0]
    stress.principalaxis1 = directionsstress[:, 0:2]
    stress.principalvalue2 = valuesstress[:, 1]
    stress.principalaxis2 = directionsstress[:, 2:4]
    stress.effectivevalue = 1. / np.sqrt(2.) * np.sqrt(stress.xx**2 +
                                                       stress.yy**2 +
                                                       2. * stress.xy**2)
    md.results.stress = stress

    strainrate = results()
    strainrate.xx = ux * md.constants.yts  #strain rate in 1/a instead of 1/s
    strainrate.yy = vy * md.constants.yts
    strainrate.xy = uyvx * md.constants.yts
    strainrate.principalvalue1 = valuesstrain[:, 0] * md.constants.yts
    strainrate.principalaxis1 = directionsstrain[:, 0:2]
    strainrate.principalvalue2 = valuesstrain[:, 1] * md.constants.yts
    strainrate.principalaxis2 = directionsstrain[:, 2:4]
    strainrate.effectivevalue = 1. / np.sqrt(2.) * np.sqrt(
        strainrate.xx**2 + strainrate.yy**2 + 2. * strainrate.xy**2)
    md.results.strainrate = strainrate

    deviatoricstress = results()
    deviatoricstress.xx = tau_xx
    deviatoricstress.yy = tau_yy
    deviatoricstress.xy = tau_xy
    deviatoricstress.principalvalue1 = valuesstress[:, 0]
    deviatoricstress.principalaxis1 = directionsstress[:, 1:2]
    deviatoricstress.principalvalue2 = valuesstress[:, 1]
    deviatoricstress.principalaxis2 = directionsstress[:, 2:4]
    deviatoricstress.effectivevalue = 1. / np.sqrt(2.) * np.sqrt(
        stress.xx**2 + stress.yy**2 + 2. * stress.xy**2)
    md.results.deviatoricstress = deviatoricstress

    return md
Exemplo n.º 25
0
    def handle_request(self, id, req):
        """Answer a request

        We first look for a file with the name of the request.
        If that exists, then we spit that out, otherwise we
        open req.headers and req.body (if available) separately.

        Why would you want to split it out?
        a) binary files
        b) Separating it out will send the 'standard' headers,
        and handle the Connection: details for you, if you're
        not testing that.
        c) You don't need to come up with your own body"""

        res = results.results(id)

        path = string.join(string.split(req, '/')[:-1], '/')

        path = path + '/'

        tester = res.get_tester(path)

        self.fname = string.split(req,'/')[-1]

        if not self.fname:
            self.fname = tester.baseName

        if not tester.verify_request(self):
            res.set_tester(req, tester)
            return self.send_error(400, tester.reason)

        ### perhaps this isn't the best design model...
        res.set_tester(path, tester)

        del res

        if req[-1:] == '/':
            req = req + tester.baseName
        
        try:
            f = open(req, 'rb')
            self.log_message('"%s" sent successfully for %s',
                             self.requestline,
                             id)
            self.copyfileobj(f,self.wfile)
            return f.close()
        except IOError:
            try:
                f = open(req+".headers", 'rb')
            except IOError:
                return self.send_error(404, "File %s not found" % (req))
        
        self.send_response(f.readline())
        # XXX - I should parse these out, and use send_header instead
        # so that I can change behaviour (like keep-alive...)
        # But then I couldn't test 'incorrect' header formats
        self.copyfileobj(f,self.wfile)
        f.close()

        try:
            f = open(req+".body", 'rb')
            ## XXXXX - Need to configify this
            ## and then send content-length, etc
            self.end_headers()
            self.copyfileobj(f, self.wfile)
            return f.close()
        except IOError:
            self.send_header('Content-Type', "text/plain")
            body = self.default_reply % (req, id)
            self.send_header('Content-Length', len(body))
            self.end_headers()
            self.wfile.write(body)
Exemplo n.º 26
0
	if variables["max_area"] == "unlimited":
			print ("Based on the occupancy group and construction type you chose, your building's allowable area per story"
				" is already unlimited, so we don't need to do any more calculations to try increasing it.\n")

	elif variables["max_area"] != "unlimited":
			print "Let's see if you can increase your building's allowable area per story.\n"
			building_area_increase(variables)




total_building_area(variables)


results(variables)

print "Your building's code analysis is complete. You can view your building's data in results.txt."












Exemplo n.º 27
0
    def handle_request(self, id, req):
        """Answer a request

        We first look for a file with the name of the request.
        If that exists, then we spit that out, otherwise we
        open req.headers and req.body (if available) separately.

        Why would you want to split it out?
        a) binary files
        b) Separating it out will send the 'standard' headers,
        and handle the Connection: details for you, if you're
        not testing that.
        c) You don't need to come up with your own body"""

        res = results.results(id)

        path = string.join(string.split(req, '/')[:-1], '/')

        path = path + '/'

        tester = res.get_tester(path)

        self.fname = string.split(req, '/')[-1]

        if not self.fname:
            self.fname = tester.baseName

        if not tester.verify_request(self):
            res.set_tester(req, tester)
            return self.send_error(400, tester.reason)

        ### perhaps this isn't the best design model...
        res.set_tester(path, tester)

        del res

        if req[-1:] == '/':
            req = req + tester.baseName

        try:
            f = open(req, 'rb')
            self.log_message('"%s" sent successfully for %s', self.requestline,
                             id)
            self.copyfileobj(f, self.wfile)
            return f.close()
        except IOError:
            try:
                f = open(req + ".headers", 'rb')
            except IOError:
                return self.send_error(404, "File %s not found" % (req))

        self.send_response(f.readline())
        # XXX - I should parse these out, and use send_header instead
        # so that I can change behaviour (like keep-alive...)
        # But then I couldn't test 'incorrect' header formats
        self.copyfileobj(f, self.wfile)
        f.close()

        try:
            f = open(req + ".body", 'rb')
            ## XXXXX - Need to configify this
            ## and then send content-length, etc
            self.end_headers()
            self.copyfileobj(f, self.wfile)
            return f.close()
        except IOError:
            self.send_header('Content-Type', "text/plain")
            body = self.default_reply % (req, id)
            self.send_header('Content-Length', len(body))
            self.end_headers()
            self.wfile.write(body)
Exemplo n.º 28
0
def parseresultsfromdiskioserial(filename):    # {{{
	"""
	PARSERESULTSFROMDISK - ...
	 
	    Usage:
	       results=parseresultsfromdiskioserial(filename)
	"""

	#Open file
	try:
		fid=open(filename,'rb')
	except IOError as e:
		raise IOError("loadresultsfromdisk error message: could not open '%s' for binary reading." % filename)

	#initialize results: 
	results=[]
	results.append(None)

	#Read fields until the end of the file.
	result=ReadData(fid)

	counter=0
	check_nomoresteps=0
	step=result['step']

	while result:

		if check_nomoresteps:
			#check that the new result does not add a step, which would be an error: 
			if result['step']>=1:
				raise TypeError("parsing results for a steady-state core, which incorporates transient results!")

		#Check step, increase counter if this is a new step
		if(step!=result['step'] and result['step']>1):
			counter = counter + 1
			step    = result['step']

		#Add result
		if result['step']==0:
			#if we have a step = 0, this is a steady state solution, don't expect more steps. 
			index = 0;
			check_nomoresteps=1
	
		elif result['step']==1:
			index = 0
		else:
			index = counter;
	
		if index > len(results)-1:
			for i in range(len(results)-1,index-1):
				results.append(None)
			results.append(resultsclass.results())
		
		elif results[index] is None:
			results[index]=resultsclass.results()

			
		#Get time and step
		if result['step'] != -9999.:
			setattr(results[index],'step',result['step'])
		if result['time'] != -9999.:
			setattr(results[index],'time',result['time']) 
	
		#Add result
		if hasattr(results[index],result['fieldname']) and not m.strcmp(result['fieldname'],'SolutionType'):
			setattr(results[index],result['fieldname'],numpy.vstack((getattr(results[index],result['fieldname']),result['field'])))
		else:
			setattr(results[index],result['fieldname'],result['field'])

		#read next result
		result=ReadData(fid)

	fid.close()

	return results
Exemplo n.º 29
0
    # Convert the orientations from the validation datasets from bunge euler
    # angles to GSH coefficients
    gsh.euler_to_gsh(el, H, ns_val, set_id_val, step, wrt_file)

    # Perform the calibration
    tensor_ID = 1
    for comp in compl:
        calibration.calibration_procedure(el, H, ns_cal, set_id_cal, step,
                                          comp, 'epsilon_t', wrt_file)

    # Perform the validation
    for comp in compl:
        validation.validation(el, H, ns_cal, ns_val, set_id_cal, set_id_val,
                              step, comp, wrt_file)

    # Perform Blurring on Solutions
    for comp in compl:
        field_blur.blur(el, ns_val, set_id_val, step, 'epsilon_p', comp)

    comp_app = 0

    for comp in compl:
        results.results(el, ns_val, set_id_val, step, L, 'epsilon', comp, 't')

    for comp in compl:
        results.results(el, ns_val, set_id_val, step, L, 'epsilon', comp, 'p')

    for comp in compl:
        results.results(el, ns_val, set_id_val, step, L, 'epsilon', comp,
                        'p_b')
Exemplo n.º 30
0
ns_val = 10
set_id_val = 'val'
set_id_cal = 'cal'
wrt_file = 'log_file.txt'

X_cal = X[0:ns_cal, ...]
X_val = X[ns_cal:, ...]

y_cal = y[0:ns_cal, ...]
y_val = y[ns_cal:, ...]

# take fft of response fields
y_fft_cal = np.fft.fftn(y_cal, axes=[1, 2, 3])
y_fft_val = np.fft.fftn(y_val, axes=[1, 2, 3])

# Convert the orientations from the calibration datasets from bunge euler
# angles to GSH coefficients
M_cal = gsh.euler_to_gsh(X_cal, el, H, ns_cal, set_id_cal, wrt_file)

# Convert the orientations from the validation datasets from bunge euler
# angles to GSH coefficients
M_val = gsh.euler_to_gsh(X_val, el, H, ns_val, set_id_val, wrt_file)

# Perform the calibration
infl_coef = calibration.calibration_procedure(M_cal, y_fft_cal, el, H, ns_cal, wrt_file)

# Perform the validation
y_mks = validation.validation(M_val, infl_coef, el, wrt_file)

results.results(infl_coef, y_val, y_mks, el, ns_val)
Exemplo n.º 31
0
    gsh.euler_to_gsh(el, H, ns_val, set_id_val, step, wrt_file)
    """Generate the fftn of the validation microstructure function"""
    msf.micr_func(el, H, ns_val, set_id_val, step, wrt_file)
    """Read strain from validation .vtk files"""
    tensor_ID = 1
    for comp in compl:
        vtk_r.read_meas(el, ns_val, set_id_val, step, comp, tensor_ID, dir_val,
                        wrt_file)
    """Perform the validation"""
    for comp in compl:
        validation.validation(el, H, ns_cal, ns_val, set_id_cal, set_id_val,
                              step, comp, 'epsilon', wrt_file)
    """Perform the error checks"""
    wrt_file2 = 'test2.txt'
    for comp in compl:
        results.results(el, ns_val, set_id_val, step, 'epsilon', comp,
                        comp_app, wrt_file, wrt_file2)
    """CHECK STRAIN TO STRESS CONVERSION"""
    """Read stress from validation .vtk files"""
    tensor_ID = 0
    for comp in compl:
        vtk_r.read_meas(el, ns_val, set_id_val, step, comp, tensor_ID, dir_val,
                        wrt_file)
    """Convert strain to stress"""
    s2s.strain2stress(el, ns_val, set_id_val, step, wrt_file)
    """Perform the error checks"""
    wrt_file2 = 'test2.txt'
    for comp in compl:
        results.results(el, ns_val, set_id_val, step, 'sigma', comp, comp_app,
                        wrt_file, wrt_file2)

    # """CHECK WRITING RESULTS TO FILE"""
Exemplo n.º 32
0
from camera_frame import cam_frame
from compare import compare
from load_folders import load_folders
from load_hists import load_hists
from results import results

# change path variables according to
path_beans_train = '../Dataset/Train_Hists/beans_hist/'
path_hazelnuts_train = '../Dataset/Train_Hists/hazelnuts_hist/'
path_chickpeas_train = '../Dataset/Train_Hists/chickpeas_hist/'
path_blackeyed_train = '../Dataset/Train_Hists/blackeyed_hist/'
path_cashews_train = '../Dataset/Train_Hists/cashews_hist/'

beans_list, hazelnuts_list, chickpeas_list, blackeyed_list, cashews_list = load_folders(
    path_beans_train, path_hazelnuts_train, path_chickpeas_train,
    path_blackeyed_train, path_cashews_train)

beans_hist, hazelnuts_hist, chickpeas_hist, blackeyed_hist, cashews_hist = load_hists(
    beans_list, hazelnuts_list, chickpeas_list, blackeyed_list, cashews_list)
hist_test = cam_frame()
dists = compare(beans_hist, hazelnuts_hist, chickpeas_hist, blackeyed_hist,
                cashews_hist, hist_test)

results(dists)
Exemplo n.º 33
0
        Hset = Hset_
        msg = "coefficients L=0-4"
        rr.WP(msg, wrt_file)
        rr.WP(msg, res_file)
        print(Hset)
    else:
        Hset = np.hstack([Hset_, ii])
        msg = "coefficients L=0-4 and coeff %s" % ii
        rr.WP(msg, wrt_file)
        rr.WP(msg, res_file)
        print(Hset)

    # Convert the orientations from the calibration datasets from bunge euler
    # angles to GSH coefficients
    gsh.euler_to_gsh(el, Hset, ns_cal, set_id_cal, step, wrt_file)

    # Convert the orientations from the validation datasets from bunge euler
    # angles to GSH coefficients
    gsh.euler_to_gsh(el, Hset, ns_val, set_id_val, step, wrt_file)

    # Perform the calibration
    calibration.calibration_procedure(el, Hset.size, ns_cal, set_id_cal, step,
                                      comp, 'epsilon_t', wrt_file)

    # Perform the validation
    validation.validation(el, Hset.size, ns_cal, ns_val, set_id_cal,
                          set_id_val, step, comp, 'epsilon_t', wrt_file)

    results.results(el, ns_val, set_id_val, step, 'epsilon', comp, 't', ii,
                    res_file, res_file2)
Exemplo n.º 34
0
Arquivo: model.py Projeto: pf4d/issm
    def extract(md, area):  # {{{
        """
		extract - extract a model according to an Argus contour or flag list

		   This routine extracts a submodel from a bigger model with respect to a given contour
		   md must be followed by the corresponding exp file or flags list
		   It can either be a domain file (argus type, .exp extension), or an array of element flags. 
		   If user wants every element outside the domain to be 
		   extract2d, add '~' to the name of the domain file (ex: '~HO.exp')
		   an empty string '' will be considered as an empty domain
		   a string 'all' will be considered as the entire domain

		   Usage:
		      md2=extract(md,area)

		   Examples:
		      md2=extract(md,'Domain.exp')

		   See also: EXTRUDE, COLLAPSE
		"""

        #copy model
        md1 = copy.deepcopy(md)

        #get elements that are inside area
        flag_elem = FlagElements(md1, area)
        if not np.any(flag_elem):
            raise RuntimeError("extracted model is empty")

        #kick out all elements with 3 dirichlets
        spc_elem = np.nonzero(np.logical_not(flag_elem))[0]
        spc_node = np.unique(md1.mesh.elements[spc_elem, :]) - 1
        flag = np.ones(md1.mesh.numberofvertices)
        flag[spc_node] = 0
        pos = np.nonzero(
            np.logical_not(np.sum(flag[md1.mesh.elements - 1], axis=1)))[0]
        flag_elem[pos] = 0

        #extracted elements and nodes lists
        pos_elem = np.nonzero(flag_elem)[0]
        pos_node = np.unique(md1.mesh.elements[pos_elem, :]) - 1

        #keep track of some fields
        numberofvertices1 = md1.mesh.numberofvertices
        numberofelements1 = md1.mesh.numberofelements
        numberofvertices2 = np.size(pos_node)
        numberofelements2 = np.size(pos_elem)
        flag_node = np.zeros(numberofvertices1)
        flag_node[pos_node] = 1

        #Create Pelem and Pnode (transform old nodes in new nodes and same thing for the elements)
        Pelem = np.zeros(numberofelements1, int)
        Pelem[pos_elem] = np.arange(1, numberofelements2 + 1)
        Pnode = np.zeros(numberofvertices1, int)
        Pnode[pos_node] = np.arange(1, numberofvertices2 + 1)

        #renumber the elements (some node won't exist anymore)
        elements_1 = copy.deepcopy(md1.mesh.elements)
        elements_2 = elements_1[pos_elem, :]
        elements_2[:, 0] = Pnode[elements_2[:, 0] - 1]
        elements_2[:, 1] = Pnode[elements_2[:, 1] - 1]
        elements_2[:, 2] = Pnode[elements_2[:, 2] - 1]
        if md1.mesh.__class__.__name__ == 'mesh3dprisms':
            elements_2[:, 3] = Pnode[elements_2[:, 3] - 1]
            elements_2[:, 4] = Pnode[elements_2[:, 4] - 1]
            elements_2[:, 5] = Pnode[elements_2[:, 5] - 1]

        #OK, now create the new model!

        #take every field from model
        md2 = copy.deepcopy(md1)

        #automatically modify fields

        #loop over model fields
        model_fields = vars(md1)
        for fieldi in model_fields:
            #get field
            field = getattr(md1, fieldi)
            fieldsize = np.shape(field)
            if hasattr(field, '__dict__') and not m.ismember(
                    fieldi, ['results'])[0]:  #recursive call
                object_fields = vars(field)
                for fieldj in object_fields:
                    #get field
                    field = getattr(getattr(md1, fieldi), fieldj)
                    fieldsize = np.shape(field)
                    if len(fieldsize):
                        #size = number of nodes * n
                        if fieldsize[0] == numberofvertices1:
                            setattr(getattr(md2, fieldi), fieldj,
                                    field[pos_node])
                        elif fieldsize[0] == numberofvertices1 + 1:
                            setattr(getattr(md2, fieldi), fieldj,
                                    np.vstack((field[pos_node], field[-1, :])))
                        #size = number of elements * n
                        elif fieldsize[0] == numberofelements1:
                            setattr(getattr(md2, fieldi), fieldj,
                                    field[pos_elem])
            else:
                if len(fieldsize):
                    #size = number of nodes * n
                    if fieldsize[0] == numberofvertices1:
                        setattr(md2, fieldi, field[pos_node])
                    elif fieldsize[0] == numberofvertices1 + 1:
                        setattr(md2, fieldi,
                                np.hstack((field[pos_node], field[-1, :])))
                    #size = number of elements * n
                    elif fieldsize[0] == numberofelements1:
                        setattr(md2, fieldi, field[pos_elem])

        #modify some specific fields

        #Mesh
        md2.mesh.numberofelements = numberofelements2
        md2.mesh.numberofvertices = numberofvertices2
        md2.mesh.elements = elements_2

        #mesh.uppervertex mesh.lowervertex
        if md1.mesh.__class__.__name__ == 'mesh3dprisms':
            md2.mesh.uppervertex = md1.mesh.uppervertex[pos_node]
            pos = np.where(~np.isnan(md2.mesh.uppervertex))[0]
            md2.mesh.uppervertex[pos] = Pnode[
                md2.mesh.uppervertex[pos].astype(int) - 1]

            md2.mesh.lowervertex = md1.mesh.lowervertex[pos_node]
            pos = np.where(~np.isnan(md2.mesh.lowervertex))[0]
            md2.mesh.lowervertex[pos] = Pnode[
                md2.mesh.lowervertex[pos].astype(int) - 1]

            md2.mesh.upperelements = md1.mesh.upperelements[pos_elem]
            pos = np.where(~np.isnan(md2.mesh.upperelements))[0]
            md2.mesh.upperelements[pos] = Pelem[
                md2.mesh.upperelements[pos].astype(int) - 1]

            md2.mesh.lowerelements = md1.mesh.lowerelements[pos_elem]
            pos = np.where(~np.isnan(md2.mesh.lowerelements))[0]
            md2.mesh.lowerelements[pos] = Pelem[
                md2.mesh.lowerelements[pos].astype(int) - 1]

        #Initial 2d mesh
        if md1.mesh.__class__.__name__ == 'mesh3dprisms':
            flag_elem_2d = flag_elem[np.arange(0, md1.mesh.numberofelements2d)]
            pos_elem_2d = np.nonzero(flag_elem_2d)[0]
            flag_node_2d = flag_node[np.arange(0, md1.mesh.numberofvertices2d)]
            pos_node_2d = np.nonzero(flag_node_2d)[0]

            md2.mesh.numberofelements2d = np.size(pos_elem_2d)
            md2.mesh.numberofvertices2d = np.size(pos_node_2d)
            md2.mesh.elements2d = md1.mesh.elements2d[pos_elem_2d, :]
            md2.mesh.elements2d[:, 0] = Pnode[md2.mesh.elements2d[:, 0] - 1]
            md2.mesh.elements2d[:, 1] = Pnode[md2.mesh.elements2d[:, 1] - 1]
            md2.mesh.elements2d[:, 2] = Pnode[md2.mesh.elements2d[:, 2] - 1]

            md2.mesh.x2d = md1.mesh.x[pos_node_2d]
            md2.mesh.y2d = md1.mesh.y[pos_node_2d]

        #Edges
        if m.strcmp(md.mesh.domaintype(), '2Dhorizontal'):
            if np.ndim(md2.mesh.edges) > 1 and np.size(
                    md2.mesh.edges, axis=1
            ) > 1:  #do not use ~isnan because there are some np.nans...
                #renumber first two columns
                pos = np.nonzero(md2.mesh.edges[:, 3] != -1)[0]
                md2.mesh.edges[:, 0] = Pnode[md2.mesh.edges[:, 0] - 1]
                md2.mesh.edges[:, 1] = Pnode[md2.mesh.edges[:, 1] - 1]
                md2.mesh.edges[:, 2] = Pelem[md2.mesh.edges[:, 2] - 1]
                md2.mesh.edges[pos, 3] = Pelem[md2.mesh.edges[pos, 3] - 1]
                #remove edges when the 2 vertices are not in the domain.
                md2.mesh.edges = md2.mesh.edges[np.nonzero(
                    np.logical_and(md2.mesh.edges[:, 0], md2.mesh.edges[:, 1])
                )[0], :]
                #Replace all zeros by -1 in the last two columns
                pos = np.nonzero(md2.mesh.edges[:, 2] == 0)[0]
                md2.mesh.edges[pos, 2] = -1
                pos = np.nonzero(md2.mesh.edges[:, 3] == 0)[0]
                md2.mesh.edges[pos, 3] = -1
                #Invert -1 on the third column with last column (Also invert first two columns!!)
                pos = np.nonzero(md2.mesh.edges[:, 2] == -1)[0]
                md2.mesh.edges[pos, 2] = md2.mesh.edges[pos, 3]
                md2.mesh.edges[pos, 3] = -1
                values = md2.mesh.edges[pos, 1]
                md2.mesh.edges[pos, 1] = md2.mesh.edges[pos, 0]
                md2.mesh.edges[pos, 0] = values
                #Finally remove edges that do not belong to any element
                pos = np.nonzero(
                    np.logical_and(md2.mesh.edges[:, 1] == -1,
                                   md2.mesh.edges[:, 2] == -1))[0]
                md2.mesh.edges = np.delete(md2.mesh.edges, pos, axis=0)

        #Penalties
        if np.any(np.logical_not(np.isnan(md2.stressbalance.vertex_pairing))):
            for i in xrange(np.size(md1.stressbalance.vertex_pairing, axis=0)):
                md2.stressbalance.vertex_pairing[i, :] = Pnode[
                    md1.stressbalance.vertex_pairing[i, :]]
            md2.stressbalance.vertex_pairing = md2.stressbalance.vertex_pairing[
                np.nonzero(md2.stressbalance.vertex_pairing[:, 0])[0], :]
        if np.any(np.logical_not(np.isnan(md2.masstransport.vertex_pairing))):
            for i in xrange(np.size(md1.masstransport.vertex_pairing, axis=0)):
                md2.masstransport.vertex_pairing[i, :] = Pnode[
                    md1.masstransport.vertex_pairing[i, :]]
            md2.masstransport.vertex_pairing = md2.masstransport.vertex_pairing[
                np.nonzero(md2.masstransport.vertex_pairing[:, 0])[0], :]

        #recreate segments
        if md1.mesh.__class__.__name__ == 'mesh2d':
            md2.mesh.vertexconnectivity = NodeConnectivity(
                md2.mesh.elements, md2.mesh.numberofvertices)[0]
            md2.mesh.elementconnectivity = ElementConnectivity(
                md2.mesh.elements, md2.mesh.vertexconnectivity)[0]
            md2.mesh.segments = contourenvelope(md2)
            md2.mesh.vertexonboundary = np.zeros(numberofvertices2, bool)
            md2.mesh.vertexonboundary[md2.mesh.segments[:, 0:2] - 1] = True
        else:
            #First do the connectivity for the contourenvelope in 2d
            md2.mesh.vertexconnectivity = NodeConnectivity(
                md2.mesh.elements2d, md2.mesh.numberofvertices2d)[0]
            md2.mesh.elementconnectivity = ElementConnectivity(
                md2.mesh.elements2d, md2.mesh.vertexconnectivity)[0]
            segments = contourenvelope(md2)
            md2.mesh.vertexonboundary = np.zeros(
                numberofvertices2 / md2.mesh.numberoflayers, bool)
            md2.mesh.vertexonboundary[segments[:, 0:2] - 1] = True
            md2.mesh.vertexonboundary = np.tile(md2.mesh.vertexonboundary,
                                                md2.mesh.numberoflayers)
            #Then do it for 3d as usual
            md2.mesh.vertexconnectivity = NodeConnectivity(
                md2.mesh.elements, md2.mesh.numberofvertices)[0]
            md2.mesh.elementconnectivity = ElementConnectivity(
                md2.mesh.elements, md2.mesh.vertexconnectivity)[0]

        #Boundary conditions: Dirichlets on new boundary
        #Catch the elements that have not been extracted
        orphans_elem = np.nonzero(np.logical_not(flag_elem))[0]
        orphans_node = np.unique(md1.mesh.elements[orphans_elem, :]) - 1
        #Figure out which node are on the boundary between md2 and md1
        nodestoflag1 = np.intersect1d(orphans_node, pos_node)
        nodestoflag2 = Pnode[nodestoflag1].astype(int) - 1
        if np.size(md1.stressbalance.spcvx) > 1 and np.size(
                md1.stressbalance.spcvy) > 2 and np.size(
                    md1.stressbalance.spcvz) > 2:
            if np.size(md1.inversion.vx_obs) > 1 and np.size(
                    md1.inversion.vy_obs) > 1:
                md2.stressbalance.spcvx[nodestoflag2] = md2.inversion.vx_obs[
                    nodestoflag2]
                md2.stressbalance.spcvy[nodestoflag2] = md2.inversion.vy_obs[
                    nodestoflag2]
            else:
                md2.stressbalance.spcvx[nodestoflag2] = np.nan
                md2.stressbalance.spcvy[nodestoflag2] = np.nan
                print "\n!! extract warning: spc values should be checked !!\n\n"
            #put 0 for vz
            md2.stressbalance.spcvz[nodestoflag2] = 0
        if np.any(np.logical_not(np.isnan(md1.thermal.spctemperature))):
            md2.thermal.spctemperature[nodestoflag2] = 1

        #Results fields
        if md1.results:
            md2.results = results()
            for solutionfield, field in md1.results.__dict__.iteritems():
                if isinstance(field, list):
                    setattr(md2.results, solutionfield, [])
                    #get time step
                    for i, fieldi in enumerate(field):
                        if isinstance(fieldi, results) and fieldi:
                            getattr(md2.results,
                                    solutionfield).append(results())
                            fieldr = getattr(md2.results, solutionfield)[i]
                            #get subfields
                            for solutionsubfield, subfield in fieldi.__dict__.iteritems(
                            ):
                                if np.size(subfield) == numberofvertices1:
                                    setattr(fieldr, solutionsubfield,
                                            subfield[pos_node])
                                elif np.size(subfield) == numberofelements1:
                                    setattr(fieldr, solutionsubfield,
                                            subfield[pos_elem])
                                else:
                                    setattr(fieldr, solutionsubfield, subfield)
                        else:
                            getattr(md2.results, solutionfield).append(None)
                elif isinstance(field, results):
                    setattr(md2.results, solutionfield, results())
                    if isinstance(field, results) and field:
                        fieldr = getattr(md2.results, solutionfield)
                        #get subfields
                        for solutionsubfield, subfield in field.__dict__.iteritems(
                        ):
                            if np.size(subfield) == numberofvertices1:
                                setattr(fieldr, solutionsubfield,
                                        subfield[pos_node])
                            elif np.size(subfield) == numberofelements1:
                                setattr(fieldr, solutionsubfield,
                                        subfield[pos_elem])
                            else:
                                setattr(fieldr, solutionsubfield, subfield)

        #Keep track of pos_node and pos_elem
        md2.mesh.extractedvertices = pos_node + 1
        md2.mesh.extractedelements = pos_elem + 1

        return md2