Пример #1
0
def getDefsList( dirForFound, cbFindCond, cbGetFileList, typeInFile, ofile ):
	# Создаем список найденных файлов
	fileList = cbGetFileList( dirForFound, typeInFile )
	
	# читаем файл и выбираем из него директивы
	f = io.open( ofile+'.h', 'w', encoding='utf-8')
	f.write(u'#-*- coding: utf-8 -*-\n')
	definedList = list('') # сюда складываем все
	for at in fileList:
		# содержимое
		string = IOOperations.getFileContent( at )
		
		# удаляем закомменченное
		strPure = PreProc.delCom( string )
		
		# Дробим на строки
		
		stringSplit = strPure.split('\n')
		for item in stringSplit:
			if cbFindCond( item ):
				f.write(item+'\n')
				newItem = item.split()[1]
				definedList.append( newItem )
	f.close()
	
	# сохраняем результаты поиска
	f = io.open( ofile+'.py', 'w', encoding='utf-8')
	f.write(u'#-*- coding: utf-8 -*-\n')
	f.write( ofile.split('/')[-1]+'List'+u' = [\n' )
	for at in definedList:
		f.write(u'\''+at+u'\','+u'\n')
	f.write(u']\n')
	f.close()
ice_obs_file = '/glade/p/work/mickelso/PyAvg-IceDiag-obs/gx1v6_grid.nc'
reg_file = '/glade/p/work/mickelso/PyAvg-IceDiag-obs/REGION_MASK.nc'
year0 = 1
year1 = 10
ncl_location = '/glade/scratch/mickelso/pyAverager_trunk/trunk/pyaverager'

#### End user modify ####

pyAveSpecifier = specification.create_specifier(in_directory=in_dir,
                                                out_directory=out_dir,
                                                prefix=pref,
                                                suffix=suffix,
                                                date_pattern=date_pattern,
                                                hist_type=htype,
                                                avg_list=average,
                                                weighted=wght,
                                                split=spl,
                                                split_files=split_fn,
                                                split_orig_size=split_size,
                                                ncformat=ncfrmt,
                                                serial=serial,
                                                ice_obs_file=ice_obs_file,
                                                reg_file=reg_file,
                                                year0=year0,
                                                year1=year1,
                                                clobber=clobber,
                                                ncl_location=ncl_location)

PreProc.run_pre_proc(pyAveSpecifier)
PyAverager.run_pyAverager(pyAveSpecifier)
Пример #3
0
reg_file ='/glade/p/work/mickelso/PyAvg-IceDiag-obs/REGION_MASK.nc'
year0 = 1
year1 = 10
ncl_location = '/glade/scratch/mickelso/pyAverager_trunk/trunk/pyaverager'

#### End user modify ####

pyAveSpecifier = specification.create_specifier(in_directory=in_dir,
			          out_directory=out_dir,
				  prefix=pref,
                                  suffix=suffix,
                                  date_pattern=date_pattern,
				  hist_type=htype,
				  avg_list=average,
				  weighted=wght,
				  split=spl,
				  split_files=split_fn,
				  split_orig_size=split_size,
				  ncformat=ncfrmt,
				  serial=serial,
                                  ice_obs_file=ice_obs_file,
                                  reg_file=reg_file,
                                  year0=year0,
                                  year1=year1,
                                  clobber=clobber,
				  ncl_location=ncl_location)

PreProc.run_pre_proc(pyAveSpecifier)
PyAverager.run_pyAverager(pyAveSpecifier)

Пример #4
0
import IOOperations

dirForFound = ["../headers/"]
  
# поис и среди исходных файлов
for dir in dirForFound:
	files = os.listdir( dir )

# И обрабатываем его
fileList = list()
for p in files :
	if p.find('.inc') != -1:
		fileList.append( dirForFound[0]+p )

import PreProc 
PreProc.getMacroFile( fileList )
import mFu

# Обрабатываем файл с кодом
ifile = '../src/_v1_IRQ.asm'
# читаем
string = IOOperations.getFileContent( ifile )
commFree = PreProc.delCom( string )
 
commFreeList = commFree.split('\n')

# замена в файлах
i = 0
f = io.open('mFu.asm', 'w', encoding='utf-8')
f.write(u'#-*- coding: utf-8 -*-\n')
for item in commFreeList:
Пример #5
0
# Preprocessing
# =============================================================================

# Import data
data_source = 'git'
market = 'AEX'
stocks = get_data(data_source, market)

# ONLY FOR NOW, SHOULD BE CHANGED!!
df = stocks['PHIA']

# Preprocessing data
split_datapoint = 5000
smoothing_window_size = 1000

pp_data = PreProc(df)
pp_data.splitdata(split_datapoint)
pp_data.normalize_smooth(smoothing_window_size, EMA=0.0, gamma=0.1)

# =============================================================================
# Define and apply LSTM
# =============================================================================

# Define hyperparameters
D = 1                           # Dimensionality of the data. Since our data is 1-D this would be 1
num_unrollings = 50             # Number of time steps you look into the future.
batch_size = 500                # Number of samples in a batch
num_nodes = [200, 200, 150]     # Number of hidden nodes in each layer of the deep LSTM stack we're using
n_layers = len(num_nodes)       # number of layers
dropout = 0.2                   # Dropout amount