def print_rf_dat(rf, name='RF'): h.hprint('Write output file: {}.dat'.format(name)) file='{}.dat'.format(name) with open(file, 'w') as f: f.write('META_START\n'.format(name)) f.write('name:{}\n'.format(name)) for l in rf: if l in ['sizes', 'parameters', 'nodes']: for k,v in rf[l].items(): f.write('{l}:{k}:{v}\n'.format(l=l, k=k, v=v)) f.write('META_STOP\n'.format(name)) f.write('\n') f.write('VALUES_START\n') for r in rf[1]: f.write('{}\n'.format(r)) f.write('VALUES_STOP\n')
def simu_rf(rf): l = rf.l n = rf.n h.hprint('Correlated Gaussian Random Field') for k,v in rf.__dict__.iteritems(): h.hprint('{k}={v}'.format(k=k, v=v), n=2) importr("RandomFields") RFsimulate = r('RFsimulate') options = r('RFoptions(seed=NA, spConform=FALSE)') if rf.type is "gauss": model = r('RMgauss(var={var}, scale={scale}) + RMnugget(var={nugget_var}) + RMtrend(mean={mean})'.format(**rf.__dict__)) elif rf.type is "matern": model = r('RMmatern(nu={nu}, scale={scale}) + RMnugget(var={nugget_var}) + RMtrend(mean={mean})'.format(**rf.__dict__)) x = r('seq(0.0, {}, {})'.format(l['x'], float(l['x'])/(n['x']-1))) y = r('seq(0.0, {}, {})'.format(l['y'], float(l['y'])/(n['y']-1))) z = r('seq(0.0, {}, {})'.format(l['z'], float(l['z'])/(n['z']-1))) grid = r('NULL') return {1:np.array(RFsimulate(model=model, x=x, y=y, z=z, grid=grid))}
def print_rf_vtk(rf, rea): c = create_lexico(rf) h.hprint('Write output file: {}.vtk'.format(rf.name)) file='{}.vtk'.format(rf.name) with open(file, 'w') as f: f.write('# vtk DataFile Version 2.0\n') f.write('{}\n'.format(rf.name)) f.write('ASCII\nDATASET STRUCTURED_GRID\n') f.write('DIMENSIONS {x} {y} {z}\n\n'.format(**rf.n)) f.write('POINTS {} float\n'.format(np.prod([n for n in rf.n.itervalues()]))) for x, y, z in zip(c['x'], c['y'], c['z']): f.write('{x} {y} {z}\n'.format(x=x, y=y, z=z)) f.write('\n') f.write('POINT_DATA {}\n'.format(np.prod([n for n in rf.n.itervalues()]))) f.write('SCALARS {} float 1\n'.format(rf.name)) f.write('LOOKUP_TABLE default\n') for r in rea[1].flatten(order='F'): f.write('{}\n'.format(r)) f.write('\n')
def read_rf_dat(f, create_mesh=False): first_line_printed = '--- Read file: {} ---'.format(f) print('\n{}'.format(first_line_printed)) h.hprint('Read file') with open('{}.dat'.format(f)) as content_file: content = [l.strip() for l in content_file if l.strip()] h.hprint('Get Metadatas') try: meta_start = content.index('META_START') meta_stop = content.index('META_STOP') except ValueError as e: h.hprint('Oups... Could not read meta datas.', n=2) h.hprint('{}'.format(e), n=3) return None rf = dict() for m in content[meta_start+1:meta_stop]: sp = m.split(':') if len(sp) == 2: if sp[0] in rf: h.hprint('couple {}:{} not considered (redondant data)'.format(*sp),n=3) else: rf.update({sp[0]:sp[1]}) elif len(sp) == 3: if sp[0] in rf: rf[sp[0]].update({sp[1]:sp[2]}) else: rf.update({sp[0]:{sp[1]:sp[2]}}) else: h.hprint('parameter {m} not considered (to deep)'.format(m=m),n=3) h.hprint('Get Values') try: values_start = content.index('VALUES_START') values_stop = content.index('VALUES_STOP') except ValueError as e: h.hprint('Oups... Could not read values.', n=2) h.hprint('{}'.format(e), n=3) return None rf.update({1:np.array([float(m) for m in content[values_start+1:values_stop]])}) if create_mesh: if 'sizes' in rf and 'nodes' in rf: create_lexico(rf['sizes'], rf['nodes']) print '-'*len(first_line_printed)+'\n' return rf