def main(_): hps = param() path_list = [hps.save_path, hps.img_save_path, hps.log_path] keys = ['run_name', 'epoch', 'batch_size', 'z_dim', 'lr'] for k in keys: v = FLAGS[k].value if v is not None: print('{} value will be changed to; {}'.format(k, v)) setattr(hps, k, v) if k is 'run_name': path_list.append(os.path.join(hps.log_path, v)) path_list.append(os.path.join(hps.save_path, v)) path_list.append(os.path.join(hps.img_save_path, v)) make_path(path_list) train_data, train_size, _, _, test_data, test_labels = mnist_data.prepare_MNIST_data( ) train_img = train_data[:, :-mnist_data.NUM_LABELS] train_label = train_data[:, -mnist_data.NUM_LABELS:] print("INFO: Loaded MNIST data, shape: {}".format(train_data.shape)) with tf.Session() as sess: model = AdversarialAutoEncoder((train_img, train_label), train_size, hps, sess) tf.logging.info("Start Training") trainer = Trainer((train_img, train_label), train_size, hps, sess, model) trainer.learn()
def crossPGA(parentA,parentB,children=2): #Creating Child Param childParam=param.param() #Locking abundance ratios if lockTiCr: childParam.comp.lockTiCr=True if lockScTi: childParam.comp.lockScTi=True if lockVCr: childParam.comp.lockVCr=True #Initialising mutation counter childParam.mutations=0 selParameters=selElements+['lum','vph'] selParamLen=len(selParameters) #Checking crossOverProbability crossOverPoint=random.uniform(1,selParamLen-1) paramChoice=random.permutation(selParameters) #Crossover Selecting from both parents for paramName in paramChoice[:crossOverPoint]: childParam[paramName]=parentA[paramName] for paramName in paramChoice[crossOverPoint:]: childParam[paramName]=parentB[paramName] childParam=mutateUniform(childParam) childParam.comp.resetOxygen() if childParam['O']<0: raise dalekExceptions.geneticException('Crossover: Child has negative oxygen') return childParam
def read_hp_values(file_location): # read folder_name f = open(constants.INFO_FOLDER + file_location) hp_values = param.param() folder_name = f.readline().strip() for line in f.readlines(): s = line.strip().split(',') if s[0] == 'hp': to_add = [] the_type = s[2] the_name = s[1] for i in range(3,len(s)): if the_type == 'f': to_add.append(float(s[i])) elif the_type == 'i': to_add.append(int(s[i])) elif the_type == 's': to_add.append(s[i]) hp_values.set_param(the_name, to_add) return hp_values
def getFicaModel(conn, modelID, origSpecID=None): curs = conn.cursor() #retrieving origSpec: if origSpecID != None: origSpec = curs.execute('select spectrum from sn_spectra where id=%s' % origSpecID) #Retrieving the Model (machineName, execTime, wFactor, errorString, abundanceID, dicaID, lumVphID, spectrumID) = curs.execute('select MACHINE, TIME, W, ERROR, ' 'ABUNDANCE_ID, DICA_ID, LUMVPH_ID, SPECTRUM_ID ' 'from FICA_MODEL where FICA_MODEL.ID=%s' % modelID).fetchall()[0] #getting dica params colNames = zip(*curs.execute('PRAGMA table_info(fica_dica)').fetchall())[1] colNames = map(str, colNames) colValues = curs.execute('select * from fica_dica where id=%s' % dicaID).fetchall()[0] dicaDict = dict(zip([convertFields2Dica[item] for item in colNames[1:]], colValues[1:])) lum, vph = curs.execute('select LUM, VPH from FICA_LUMVPH where FICA_LUMVPH.ID=%s' % lumVphID).fetchall()[0] dicaDict['log_lbol'] = lum dicaDict['v_ph'] = vph dica = param.dica(initDica=dicaDict, mode='fromDict') return dica #getting abundances colNames = zip(*curs.execute('PRAGMA table_info(fica_abundance)').fetchall())[1] colNames = map(str, colNames) colValues = curs.execute('select * from fica_abundance where id=%s' % abundanceID).fetchall()[0] compDict = dict(zip(colNames[1:], colValues[1:])) comp = param.comp(initComp=compDict, t=dica['t']) comp._setNiDecay() curParam = param.param(initDica=dica, initComp=comp)
def read_param(file_location): # read folder_name f = open(constants.INFO_FOLDER + file_location) the_params = param.param({}) hp_values = param.param() folder_name = f.readline().strip() global_stuff.RESULTS_FOLDER = global_stuff.RESULTS_BASE_FOLDER + folder_name + '/' for line in f.readlines(): print line if line[0] != '#': s = line.strip().split(',') if s[0] != 'hp': the_name = s[0] if the_name == 'n': node_features = [] for i in range(1, len(s)): node_features.append(constants.get_master_node_feature_list()[int(s[i])]) the_params.set_param('n', node_features) if the_name == 'e': edge_features = [] for i in range(1, len(s)): edge_features.append(constants.get_master_edge_feature_list()[int(s[i])]) the_params.set_param('e', edge_features) try: the_type = s[1] if the_type == 'f': the_params.set_param(the_name, float(s[2])) elif the_type == 'i': the_params.set_param(the_name, int(s[2])) elif the_type == 's': the_params.set_param(the_name, s[2]) except: pass # hp values file happens to be the same as info file, so set that the_params.set_param('hpvf', file_location) if len(the_params.get_param('e')) != 0: assert the_params.get_param('wif') != 2 return folder_name, the_params
def __init__(self, ctr): super().__init__(ctr) self.bspread = 20 self.aspread = 20 self.eblean = 0 self.ealean = 0 self.olean = 0 # used in b,a for option spot calculation. OptionSpot = (b+a)/2+olean self.par = param.param('spread.DAS', self.on_spread)
def createOptions(): option.pvol = param.param('option.vol', option.on_vol) for i, j in ledx.market.contract.contracts.items(): try: if 'option' in j.msg['derivative_type'] and j.msg['active']: x = option(j.msg, j) except Exception as e: logging.warning(e) logging.info('options created ' + str(len(option.options)))
def crossArith(parentA,parentB,mutationRate=0.2): childParam=param.param() rChoice=random.permutation(selElements) for element in rChoice: childParam[element]=np.mean([parentA[element],parentB[element]]) if childParam['O']<0: raise Exception('Crossover: Child has negative oxygen') if any(np.array(childParam.comp.data.values())<0): raise dalekExceptions.geneticException('Negative values: %s'%childParam.comp.data.values()) childParam['lum']=np.mean([parentA['lum'],parentB['lum']]) childParam['vph']=np.mean([parentA['vph'],parentB['vph']]) return childParam
def set_params(options, param_file): ## default ## options['params']['unit'] = 'second' options['params']['dt'] = 162 options['params']['hotstart'] = 2 options['params']['rnday'] = 59 options['params']['output dt'] = 486 options['params']['file length'] = 44712 #12.42 hours in seconds options['params']['hotstart dt'] = 44712 # in seconds p = param() p.pass_values(options['params']) p.write(fileout=param_file)
def get_param(): import param p = param.param({'ev':.05, 'uniprot_id':'REVERSE', 'avg_deg':1, 'n_cutoff':0, 'f_cutoff':15, 'which_msa':0, 'which_weight':1, 'which_dist':3, 'pseudo_c':0.1, 'which_blast':2, 'blmax':999999, 'which_impute':0, 'filter_co':0.35, 'psicov_sep':6, 'psicov_gap':0.5, 'psicov_r':.001, 'psiblast_iter':1, 'hhblits_iter':2, 'co':5.0, 'which_dataset':'hemo_stone', 'which_neighbors':1, 'protein_list_file':'rascalled_completed', 'to_leon':0, 'to_cluster':1, 'to_rascal':1, 'to_normd':0, 'norm_co':9.0, 'mut_freq':15}) return p
def crossSingle(parentA,parentB): #Creating Child Param childParam=param.param() #Locking abundance ratios if config.GAConfDict['lockTiCr']: childParam.comp.lockTiCr=True if config.GAConfDict['lockScTi']: childParam.comp.lockScTi=True if config.GAConfDict['lockVCr']: childParam.comp.lockVCr=True #Initialising mutation counter childParam.mutations=0 #selParameters=selElements+['lum','vph'] selParameters=config.GAConfDict['selParameters'] #closedGAParameters = list(set(config.GAConfDict['openGAParametersDefault']) # - set(config.GAConfDict['openGAParameters'])) closedGAParameters = [] selParamLen = len(selParameters) #Checking crossOverProbability if random.random()<config.GAConfDict['crossOverProbability']: crossOverPoint=random.uniform(1,selParamLen-1) paramChoice=random.permutation(selParameters) #Crossover Selecting from both parents for paramName in paramChoice[:crossOverPoint]: childParam[paramName]=parentA[paramName] for paramName in paramChoice[crossOverPoint:]: childParam[paramName]=parentB[paramName] else: #No crossover, selecting parameters from ParentA for paramName in selParameters: childParam[paramName]=parentA[paramName] if closedGAParameters != []: for paramName in selParameters: childParam[paramName] = parentA[paramName] childParam=mutateUniform(childParam) childParam.comp.resetOxygen() if childParam['O']<0: raise dalekExceptions.geneticException('Crossover: Child has negative oxygen') return childParam
def fromPath(cls, basePath=".", machineName=None, param=None, origSpec=None, fitFunc=None, t=None, execTime=None): if param == None: dicaData = fileio.dicafile(os.path.join(basePath, "dica.dat")).read_data() compData = fileio.compfile(os.path.join(basePath, "comp.ind")).read_data() dica = paramMod.dica(dicaData, mode="fromPath", t=t) comp = paramMod.comp(compData, t=dica["t"]) param = paramMod.param(initDica=dica, initComp=comp) aSpecPath = os.path.join(basePath, "spct.dat") try: aSpec = spectrum(aSpecPath, usecols=(0, 2)) sbib = fileio.sbibfile(os.path.join(basePath, "sbib.dat")).read_data() llist = sbib["llist"] wParams = fileio.ststfile(os.path.join(basePath, "stst.dat")).getWParams() specFlag = 0 except: print "Creating fake Spectrum @%s" % basePath aSpec = spectrum(zip(np.linspace(2000, 20000, 20), range(1, 21))) sbib = {"llist": []} llist = sbib["llist"] wParams = [] specFlag = -1 log = list(file(os.path.join(basePath, "fica.log"))) # error=list(file(os.path.join(basePath,'error.log'))) if wParams != []: w = wParams[-1][0][-1] else: w = -1 return cls( aSpec, param, w, machineName=None, execTime=execTime, wParam=wParams, error=None, ficaLog=log, llist=llist, origSpec=origSpec, specFlag=specFlag, fitFunc=fitFunc, )
def initTriCycle(IGEElement,doPickle=True,samples=10): fitHist=param.fitHistory() lumInterval=runLumCycle(maxIter=5) curParam=param.param() curParam['lum']=lumInterval['suggestValue'] intervals={'luminterval':lumInterval['interval'], 'vphinterval':initialize.getVphBounds(), #make sure that no other metal really shoots up 'igeinterval':initialize.getElementBounds(IGEElement,curParam.comp)} #pdb.set_trace() lumRange=np.linspace(intervals['luminterval'][0],intervals['luminterval'][1],num=samples) vphRange=np.linspace(intervals['vphinterval'][0],intervals['vphinterval'][1],num=samples) IGERange=np.linspace(intervals['igeinterval'][0],intervals['igeinterval'][1],num=samples) lumMG,vphMG,IGEMG,curParamMG=launcherSteps.launchTriCycle(lumRange,vphRange,IGERange,IGEElement,initParam=curParam) if doPickle: pickle.dump(lumMG,file('lumMG0.pkl','w')) pickle.dump(vphMG,file('vphMG0.pkl','w')) pickle.dump(IGEMG,file('igeMG0.pkl','w')) evalTriCycle(lumMG,vphMG,IGEMG,curParamMG,curParam,intervals,fitHist,IGEElement,mode='init') return fitHist,curParam,intervals
def createRandomParam(randomParamFunc=None): #creating new paramObject lumLimits = config.GAConfDict['lumLimits'] vphLimits = config.GAConfDict['vphLimits'] randomParam=param.param() if config.GAConfDict['lockTiCr']: randomParam.comp.lockTiCr=True if config.GAConfDict['lockScTi']: randomParam.comp.lockScTi=True if config.GAConfDict['lockVCr']: randomParam.comp.lockVCr=True if randomParamFunc==createRandomLogNormalValueBestFit: fname=glob('*.bf.pkl')[0] bestFitParam=cPickle.load(file(fname)) #Limiting luminosity and photospheric velocity near best fit value, commented out atm #randomParam['lum']=random.uniform(bestFitParam['lum']-0.1,bestFitParam['lum']+0.1) #no limit for lum parameter #randomParam['lum']=bestFitParam['lum'] randomParam['lum']=random.uniform(*lumLimits) else: randomParam['lum']=random.uniform(*lumLimits) randomParam['vph']=random.uniform(*vphLimits) rChoice=random.permutation(config.GAConfDict['selElements']) for element in rChoice: bounds=initialize.getElementBounds(element,randomParam.comp) curAbundance=randomParam[element] #newAbundance=random.uniform(bounds[0],bounds[1]) newAbundance=randomParamFunc(curAbundance,element,bounds) if any(np.array(bounds)<0): raise Exception('Negative Bounds') randomParam[element]=newAbundance randomParam.comp.resetOxygen() if randomParam['O']<0: pdb.set_trace() if randomParam[element]<0: pdb.set_trace() if randomParam.comp.data.has_key('element'): raise Exception() #randomParam['Ca']=0.01 #randomParam['vph']=11700 return randomParam
def human_classify(self, record): import wc import param p = param.param({'pid':record.pid, 'rec_idx':record.idx}) stored_qa = wc.get_stuff(side_effect_human_input_report_labels, p) import quesions the_q = questions.urinary_incontinence try: ans = stored_qa[the_q] except KeyError: raise my_exceptions.NoFxnValueException else: if ans == 0: raise my_exceptions.NoFxnValueException else: if ans in [1,2]: return 1 elif ans in [3,4]: return 0 else: pdb.set_trace() raise
def __init__(self, maker, params=param({})): # pdb.set_trace() maker, params = self.before_init(maker, params) self.other_init(maker, params) self.basic_init(maker, params)
import global_stuff import wc import param import objects import sys which_job = int(sys.argv[1]) total_jobs = int(sys.argv[2]) which_object = objects.pairwise_dist f = open(global_stuff.protein_list_file, "r") i = 0 for line in f: if i % total_jobs == which_job: protein_name = line.strip() wc.get_stuff(which_obj, param.param({"uniprot_id": protein_name}), True, True, False)
from glob import glob import os import random import cv2 import imageio from tensorflow.keras import backend as K # from keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions from tensorflow.keras.preprocessing import image from tensorflow.keras.models import load_model from matplotlib import pyplot as plt import numpy as np import param p = param.param() rand = random.Random(42) assert K.image_data_format() == 'channels_last' def preprocess_input(imgs): batch = [] for img in imgs: img = np.array(img, dtype=float) img -= np.amin(img) img /= np.amax(img) batch.append(img) batch = np.array(batch) return batch
Exps = collections.defaultdict(list) Labels = {} Ids = {} for i, j in contracts.items(): if j['derivative_type'] == 'options_contract': Ids[j['label']] = i Labels[i] = j['label'] exp = dateutil.parser.parse( j['date_expires']).replace(tzinfo=None).date() Exps[exp].append(j) NamesS = set(Ids.keys()) ExpS = set([i.isoformat() for i in Exps.keys()]) p = param.param('option.vol') rex1 = re.compile('(\d+[\.]?\d*)') rex2 = re.compile('([+-]?\d+[\.]?\d*),([+-]?\d+[\.]?\d*)') rex3 = re.compile('(\d+),(\d+)') rex1 = re.compile('(\d+[\.]?\d*)') rex1n = re.compile('([+-]?\d+[\.]?\d*)') rexb = re.compile('True|False') rexday = re.compile('(BTC|ETH)-Day$') rexdayK = re.compile('(BTC|ETH)-Day-(Call|Put)-\$([0-9,]*)$') def getSize(x): name = input("What's your " + x + " bsize/asize? ") res = rex3.match(name) if res:
import param import math import functools #Puntos P = [(-3, 0), (-1, 4), (2, 3), (4, 1)] n = 4 param.points(*zip(*P)) # Calcular polinomio def a(i, t): # Productoria factors = ((t - k) / (i - k) for k in range(n) if k != i) return functools.reduce(lambda x, y: x * y, factors) def L(t): X, Y = zip(*P) return \ sum(X[i]*a(i,t) for i in range(n)), \ sum(Y[i]*a(i,t) for i in range(n)) param.param(L, 0, n - 1, 100)
for line in f: name = line.strip() folder = global_stuff.base_folder + name + '/' files = os.listdir(folder) has_easy = False has_dist = False enough_rows = False for a_file in files: if 'easy' in a_file: has_easy = True subprocess.call(['cp', folder+a_file, folder+'msa']) if 'pairwise' in a_file: has_dist = True subprocess.call(['cp', folder+a_file, folder+'dists']) # copy to better file_name msa = wc.get_stuff(objects.agW, param.param({'uniprot_id':name, 'ev':evalue}), False, False, False) if len(msa) > 50: enough_rows = True if has_easy and has_dist and enough_rows: completed.append(name) g = open(global_stuff.completed_list_file, 'w') for name in completed: g.write(name + '\n') f.close g.close()
pdb_folders = os.listdir(home) f = open(pdb_list_file, 'w') new_folders = [] for folder in pdb_folders: s = folder.strip().split('_') pdb_name = s[0] params = param.param({'p':pdb_name}) while 1: try: g = wc.get_stuff(objects.fW, params, False, False, False) except Exception, err: print err import time time.sleep(20) else: break structure = Bio.PDB.PDBParser().get_structure(params.get_param('p'), g) if s[1] == '': letter = structure[0].child_dict.keys()[0] else:
`plot.show()` ''' # __MATH MODULES__________________________ import numpy as np import pandas as pd import matplotlib.pyplot as plt from scipy import stats import seaborn as sns # __USER MODULES__________________________ import read_table as rt import param param = param.param() path = param['in'] freq_start = param['freq_start'] freq_stop = param['freq_stop'] freq_num = param['number_of_rows'] freq_index = np.linspace(freq_start, freq_stop, freq_num) # 周波数範囲 # __DATA__________________________ df = rt.dataframe(path, regex='20160201_00') # regexが空のときはdataglob()によって入力が施される def plt_setting(plot_element): ''' 引数: plot_element:プロットする要素数(value)
import param import math #Puntos P = [(-1, 0), (1, 4), (3, -2), (4, 3), (6, 1)] n = 5 k = 3 param.points(*zip(*P)) # Algoritmo de De Boore def S(t): d_x, d_y = zip(*(P + [(0, 0)])) a = int(t) for j in range(1, k + 1): temp_x, temp_y = [0] * (n + 1), [0] * (n + 1) for i in range(a - 3 + j, a + 1): a_ir = (t - i) / (k - j + 1) temp_x[i] = (1 - a_ir) * d_x[i - 1] + a_ir * d_x[i] temp_y[i] = (1 - a_ir) * d_y[i - 1] + a_ir * d_y[i] d_x, d_y = temp_x, temp_y return d_x[a], d_y[a] param.param(S, 3., 5., 100)
import param #Puntos P = [(-3, 0), (-1, 4), (2, 3), (4, 1)] n = 4 param.points(*zip(*P)) # Polinomio hallado en el examen def B(t): return \ -3 + 6*t + 3*t**2 - 2*t**3, \ 12*t - 15*t**2 + 4*t**3 param.param(B, 0., 1., 100)
def get_param(): import param p = param.param({'ev':1e-10, 'protein_list_file':'mf_done', 'uniprot_id':'Q8WXA2', 'avg_deg':3, 'n_cutoff':0, 'f_cutoff':15, 'which_msa':1, 'which_weight':1, 'which_dist':1, 'pseudo_c':1}) return p
def __init__(self, maker, params = param({})): # pdb.set_trace() maker, params = self.before_init(maker, params) self.base_folder = global_stuff.base_folder self.other_init(maker, params) self.basic_init(maker, params)
import random random.seed(111) EPS = 1e-6 nVariables = 10 nCassures = 60 xMin = -10*np.arange(nVariables)-10 xMax = +10*np.arange(nVariables)+10 ITERATIONS = [] nIterations = len(ITERATIONS) CUT_RHS = np.empty(len(ITERATIONS)) CUT_COEFF = np.empty((nVariables, nIterations), dtype = object) CUT_POINT = np.empty((nVariables, nIterations), dtype = object) parametres = param(EPS, nVariables, nCassures, nIterations, xMin, xMax, ITERATIONS, CUT_RHS, CUT_COEFF, CUT_POINT) X = X_generator(parametres) r = Modele(parametres).resultats() m = Modele final_result = cutting_plane(parametres, m, X) #Print solution print("x* = %s " %final_result['x']) #Check : final_result['pb'].status : optimal = 1
from __future__ import division import scipy.misc import tensorflow as tf from param import param import numpy as np import os hps = param() def load_dataset(data, label=None): try: _, _ = data.shape except: raise ValueError("Data type should be ndarray") if label is None: label = np.zeros_like(data) dataset = tf.data.Dataset.from_tensor_slices((data, label)) dataset = dataset.map(parse_img) dataset = dataset.shuffle(100000).repeat() dataset = dataset.batch(hps.batch_size) return dataset def parse_img(img, label): img = tf.cast(img, tf.float32)
class twosided(object): Out = {} market = None lastb = None lasta = None last_spot = None orders = {} pstatus = param.param('quoting.status', order.on_status) def __init__(self, ctr): self.contract = ctr self.bspread = 500. self.aspread = 500. self.espread = 0. self.bsize = 2 self.asize = 2 self.lastac = None self.lastbc = None self.lastao = order(ctr) self.lastbo = order(ctr) self.lastao.quoting = True self.lastbo.quoting = True self.ok = True self.option = None twosided.Out[ctr] = self twosided.orders[ctr.id] = { True: self.lastao, False: self.lastbo } # 'is_ask': True is an offer @staticmethod async def cancel(): for i, j in twosided.Out.items(): await j.lastao.cancel() await j.lastbo.cancel() j.lastac = None j.lastbc = None async def canceltwo(self): await self.lastao.cancel() await self.lastbo.cancel() self.lastac = None self.lastbc = None def on_fill(self, msg): self.lastac = None self.lastbc = None if self.option is not None: self.option.on_fill(msg) async def on_spot(self, prb, pra=None): if pra is None: if prb is None: return pra = prb twosided.lastb = prb twosided.lasta = pra twosided.last_spot = (prb + pra) / 2. async def on_quote(self, prb, pra): self.ok = False prbc = order.cents(prb) prac = order.cents(pra + 0.25) if prbc != self.lastbc and prac != self.lastac: bf = prbc < self.lastbc if self.lastbc is not None else True self.lastbc = prbc self.lastac = prac if bf: res = await self.lastbo.send(self.bsize, prb) res = await self.lastao.send(-self.asize, pra) else: res = await self.lastao.send(-self.asize, pra) res = await self.lastbo.send(self.bsize, prb) self.ok = True return if prbc != self.lastbc: self.lastbc = prbc res = await self.lastbo.send(self.bsize, prb) elif prac != self.lastac: self.lastac = prac res = await self.lastao.send(-self.asize, pra) self.ok = True async def on_spread(self, cmd): logging.info('spread update ' + str(cmd)) if 'bspread' in cmd: self.bspread = float(cmd['bspread']) if 'bsize' in cmd: self.bsize = int(cmd['bsize']) if 'aspread' in cmd: self.aspread = float(cmd['aspread']) if 'asize' in cmd: self.asize = int(cmd['asize']) self.espread = 0
import f as features import new_new_objects as objects import param # import try_svm # [features.xW, features.akW] # the_dict = {'data_list_file':'q.pl', 'edge_feature_list':[features.xW, features.akW], 'node_feature_list':[features.ayW, features.xW, features.vW, features.uW, features.wW, features.zW], 'dist_cut_off':5, 'pdb_name':'1b6b', 'chain_letter':'B', 'reg':10, 'mfmi':100, 'wif':0, 'wfld':0, 'nfld':2, 'ns':2, 'wob':0, 'evalue':1e-10, 'pos1':1, 'pos2':2, 'wreg':1, 'trun':99, 'wclf':objects.pW, 'svmC':10} # the_dict = {'hp':param.param(), 'd':'cw.pl', 'e':[features.xW, features.akW], 'n':[features.beW, features.bbW, features.ayW, features.xW, features.vW, features.uW, features.wW, features.zW], 'co':5, 'pdb_name':'12as', 'chain_letter':'A', 'reg':100, 'mfmi':20, 'wif':0, 'wfld':0, 'nfld':2, 'ns':2, "wob":0, 'wob2':2, 'evalue':1e-10, 'pos1':1, 'pos2':2, 'wreg':1, 'trun':99, 'wclf':objects.pW, 'svmC':10, 'lgn':9, 'lgc':5, 'pos':45, 'nwc':-1.0, 'micut':5, 'wtpr':0, 'posw':150, 'sfc':1, 'self':False, 'mx':50, 'ok':3, 'ik':2, 'md':1, 'tj':1, 'wj':0, 'hpvf':'test_hp'} the_dict = { "hp": param.param(), "d": "cw.pl", "e": [], "n": [features.beW, features.bbW, features.ayW, features.xW, features.vW, features.uW, features.wW, features.zW], "co": 5, "pdb_name": "12as", "chain_letter": "A", "reg": 100, "mfmi": 20, "wif": 2, "wfld": 0, "nfld": 2, "ns": 2, "wob": 0, "wob2": 2, "evalue": 1e-10, "pos1": 1, "pos2": 2,
def fromDB(cls, conn, modelID, GARunID=None): curs = conn.cursor() # retrieving origSpec: if GARunID != None: origSpec = curs.execute("select SN_SPECTRUM from GA_RUN where id=%s" % GARunID).fetchall()[0][0] else: origSpec = None # Retrieving the Model ( machineName, execTime, wFactor, errorString, ficaLog, abundanceID, dicaID, lumVphID, spectrumID, ) = curs.execute( "select MACHINE, TIME, W, ERROR, FICA_LOG, " "ABUNDANCE_ID, DICA_ID, LUMVPH_ID, SPECTRUM_ID " "from FICA_MODEL where FICA_MODEL.ID=%s" % modelID ).fetchall()[ 0 ] if spectrumID == "None": specFlag = -1 else: specFlag = 0 # getting dica params colNames = zip(*curs.execute("PRAGMA table_info(fica_dica)").fetchall())[1] colNames = map(str, colNames) colValues = curs.execute("select * from fica_dica where id=%s" % dicaID).fetchall()[0] dicaDict = dict(zip([dalekDB.convertFields2Dica[item] for item in colNames[1:]], colValues[1:])) lum, vph = curs.execute("select LUM, VPH from FICA_LUMVPH where FICA_LUMVPH.ID=%s" % lumVphID).fetchall()[0] dicaDict["log_lbol"] = lum dicaDict["v_ph"] = vph dica = paramMod.dica(initDica=dicaDict, mode="fromDict") # getting abundances colNames = zip(*curs.execute("PRAGMA table_info(fica_abundance)").fetchall())[1] colNames = map(str, colNames) colValues = curs.execute("select * from fica_abundance where id=%s" % abundanceID).fetchall()[0] compDict = dict(zip(colNames[1:], colValues[1:])) comp = paramMod.comp(initComp=compDict, t=dica["t"]) comp._setNiDecay() # getting aSpec if specFlag == 0: wl = dalekDB.createWLGrid(dicaDict["wl"] * 1e4, dicaDict["grid"] * 1e4, dicaDict["mu"]) intens = curs.execute("select spectrum from fica_spectrum where id=%s" % abundanceID).fetchall()[0][0] aSpec = spectrum(wl, intens) elif specFlag == -1: aSpec = spectrum(zip(np.linspace(2000, 20000, 20), range(1, 21))) sbib = {"llist": []} llist = sbib["llist"] wParam = [] # getting llist colValues = curs.execute( "select eqw, shift, rest, atom, ion, param1, param2, param3 " "from FICA_LLIST where model_id=%d" % modelID ).fetchall() # checking if llist exists for current model if colValues == []: llist = None else: colNames = zip(*curs.execute("PRAGMA table_info(fica_llist)").fetchall())[1] colNames = [ (str(item.lower()), "|S2") if item == "ATOM" else (str(item.lower()), float) for item in colNames[2:] ] llist = np.array(colValues, dtype=colNames) """ Commented out until wParams becomes important, W is safed none the less #getting wParams colValues = curs.execute('select XS, VS, LOGRH, TE, TR, W ' 'from FICA_WPARAM where FICA_WPARAM.model_id=%d' % model_id).fetchall() #checking if WParams exists for current model if colValues == []: llist = None else: colNames = zip(*curs.execute('PRAGMA table_info(fica_WPARAM)').fetchall())[1] colNames = [(item.lower(), '|S2') if item=='ATOM' else (item.lower(), float) for item in colNames[2:]] llist = np.array(colValues, dtype=colNames) """ wParam = None curParam = paramMod.param(initDica=dica, initComp=comp) return cls( aSpec, curParam, wFactor, machineName=machineName, execTime=execTime, wParam=wParam, error=errorString, ficaLog=ficaLog, llist=None, origSpec=origSpec, specFlag=specFlag, )
from wrapper_decorator import dec import wrapper import param import pdb # will contain registry of wrappers. if it is a wrapper, in constructor will obtain current constructor's number. the constructor returns wrappers. make it a wrapper even though not taking advantage of pickling, still need caching (wrappers string to idx doesn't store actual wrappers). if you want to create a wrapper with parameters, would pass that in as a parameter in params class wrapper_catalog(wrapper.obj_wrapper, wrapper.indexing_wrapper): # since there is no maker, hackishly set it to self def __init__(self, maker, params): #pdb.set_trace() maker = self self.basic_init(maker, params) self.maker.set_param(params, "source_instance", self) self.cache = caches.object_cache_for_wrapper(maker, params) def is_indexed(self): return False # params contains which_wrapper, and if which_wrapper is a generic_dumper_wrapper, contains @dec def constructor(self, params, recalculate = False, to_pickle = False, to_filelize = False): wrapper_instance = self.get_param(params, "which_wrapper_class")(self, params) return wrapper_instance #pdb.set_trace() wc = wrapper_catalog(None, param.param({}))
import wc import objects import param import pdb p = param.param() A = set(wc.get_stuff(objects.PID_with_SS_info, p)) B = set(wc.get_stuff(objects.PID_with_shared_MRN, p)) C = set(wc.get_stuff(objects.PID_with_several_tumors, p)) PID_to_use = A - B - C PID_to_MRN = wc.get_stuff(objects.PID_to_MRN_dict,p) i = 0 lengths = [] for PID in PID_to_use: p.set_param('pid',PID) texts = wc.get_stuff(objects.raw_medical_text,p) lengths.append(len(texts)) print i, PID, len(texts) i += 1 pdb.set_trace()
def uhgs(minSol, maxSol, omega, muelite, itDiv): minSol = int(minSol) maxSol = int(maxSol) instance = "X-n101-k25.dat" itMax = 2000 prep = 0.5 near = 0.2 muclose = 0.8 resultfile = instance + "_results.txt" try: p = param("./instances/" + instance, minSol, maxSol, omega, muelite, prep, itMax, itDiv, near, muclose) possol = [] while(len(possol) == 0): possol, negsol = mc.initializepop(p) if len(possol) == 0: p.omega += 10 mc.recomputesimilarity(negsol, p) mc.recomputefitness(negsol, p) mc.recomputesimilarity(possol, p) mc.recomputefitness(possol, p) start = time.time() it = 0 itDivCount = 0 best = copy.deepcopy(min(possol, key=utils.takecost)) besttime = 0 while it < p.itMax and (time.time() - start)/60 < 30: args = [] for _ in range(10): args.append([possol, negsol, p]) with ProcessPoolExecutor() as executor: result = executor.map(mc.crossandedu, args) solutions = list(result) solutions = [item for sublist in solutions for item in sublist] for sol in solutions: if sol.feas: possol.append(sol) else: negsol.append(sol) it += int(10) itDivCount += int(10) if min(possol, key=utils.takecost).costo < best.costo: it = 0 itDivCount = 0 best = copy.deepcopy(min(possol, key=utils.takecost)) besttime = time.time() - start if len(possol) > p.maxSol: mc.recomputesimilarity(possol, p) mc.recomputefitness(possol, p) del possol[p.minSol:] if best.costo < min(possol, key=utils.takecost).costo: possol.append(best) if len(negsol) > p.maxSol: mc.recomputesimilarity(negsol, p) mc.recomputefitness(negsol, p) del negsol[p.minSol:] if itDivCount > p.itDiv*p.itMax: possol.sort(key=utils.takefitness, reverse=True) negsol.sort(key=utils.takefitness, reverse=True) del possol[int(p.minSol/3):] del negsol[int(p.minSol/3):] possol, negsol = mc.fillpop(possol, negsol, p) mc.recomputesimilarity(possol, p) mc.recomputefitness(possol, p) mc.recomputesimilarity(negsol, p) mc.recomputefitness(negsol, p) itDivCount = 0 p.printonfile(resultfile) best.printonfile(resultfile) file = open(resultfile, 'a') file.write("\nBest founded after ") file.write(str(besttime/60)) file.write(" min\nProgram ended in: ") file.write(str((time.time() - start)/60)) file.write(" min\n\n\n\n") file.close() return -best.costo except Exception: print("error") traceback.print_exc() return -999999999
import re, param import gnureadline gnureadline.parse_and_bind('tab: complete') gnureadline.parse_and_bind('set editing-mode vi') p = param.param('spread.DAS') rex2 = re.compile('(\d+)@([+-]?\d+[\.]?\d*),(\d+)@([+-]?\d+[\.]?\d*)') rex3 = re.compile('(\d+)@([+-]?\d+[\.]?\d*),(\d+)@([+-]?\d+[\.]?\d*),([+-]?\d+[\.]?\d*)') rex1 = re.compile('(\d+)@([+-]?\d+[\.]?\d*)') rexf = re.compile('(\d+[\.]?\d*)') x = p.get() if x is not None: olean = x['olean'] if 'olean' in x else 0 while True: x = p.get() print(x) if x is not None: olean = x['olean'] if 'olean' in x else 0 else: olean = 0 name = input("What's your spread? ") res = rex3.match(name) if res: bsz = int(res.groups()[0]) bspr = float(res.groups()[1]) asz = int(res.groups()[2])
def PID_to_MRN(pid): import wc, objects m = wc.get_stuff(objects.PID_to_MRN_dict, param.param()) return m[pid]
import wrapper #import f as features import param import pdb import objects import global_stuff import my_exceptions #pdb.set_trace() useless = wrapper.famished_wrapper() wc = wrapper.wrapper_catalog(useless, param.param({})) def get_stuff(wrapper_class, params, recalculate=False, to_pickle=False, to_filelize=False, always_recalculate = False): params.set_param('which_wrapper_class', wrapper_class) wc_used_keys, wc_all_keys, wrapper_instance, all_keys_key_key_set = wc.constructor(params, True, False, False) try: stuff_used_keys, stuff_all_keys, stuff, stuff_all_keys_key_key_set = wrapper_instance.constructor(params, recalculate, to_pickle, to_filelize, always_recalculate = always_recalculate) except Exception, err: print 'ERROR when calling get_stuff with this error', err import traceback, sys for frame in traceback.extract_tb(sys.exc_info()[2]): fname, lineno,fn,text = frame print "Error in %s on line %d" % (fname, lineno) print sys.exc_traceback.tb_lineno raise my_exceptions.WCFailException else:
from keras.preprocessing.image import ImageDataGenerator from models import models from param import param p = param() vgg = models().vgg_net_v2() vgg.summary() train_data_generator = ImageDataGenerator( rescale=1./255, shear_range=.2, zoom_range=.2, horizontal_flip=True ) train_generator = train_data_generator.flow_from_directory( p.train_fold, target_size=(150, 150), batch_size=p.batch_size, class_mode='binary' ) test_generator = ImageDataGenerator(rescale=1./255).flow_from_directory( p.test_fold, target_size=(150, 150), batch_size=p.batch_size, class_mode='binary' )
def testParam(self): p1 = param(self.api, ht.COLUMN1) self.assertEqual(p1, 'one') p2 = param(self.api, ht.COLUMN1, row_num=2) self.assertEqual(p2, 'two')
import wc import param import objects p = param.param({'ev':1e-10, 'protein_list_file':'hum_var_msa_dist_completed', 'uniprot_id':'P80075', 'avg_deg':20, 'n_cutoff':0, 'f_cutoff':15}) m = wc.get_stuff(objects.pairwise_dist, p, False, False, False)
def get_wrapper_instance(wrapper): import param temp = param.param() temp.set_param('which_wrapper_class', wrapper) a,b,c,d = wc.constructor(temp, True, False, False) return c
import wc import param import objects import global_stuff import helper import wrapper import sys name = sys.argv[1] which_msa = int(sys.argv[2]) try: itera = int(sys.argv[3]) except: pass p = param.param({'pdb':'1JOS', 'chain':'A', 'which_dataset':'CBS', 'uniprot_id':name, 'co':7.0, 'which_blast':0, 'which_msa':which_msa, 'ev':.05, 'blmax':999999,'hhblits_iter':itera, 'which_neighbors':1, 'protein_list_file':'rascalled_completed', 'to_leon':0, 'to_cluster':1, 'to_rascal':0, 'to_normd':0, 'norm_co':9.0, 'psiblast_iter':itera}) wc.get_stuff(wrapper.my_msa_obj_wrapper, p) p.set_param('to_rascal', 1) wc.get_stuff(wrapper.my_msa_obj_wrapper, p) p.set_param('to_normd', 1) wc.get_stuff(wrapper.my_msa_obj_wrapper, p)
import param p = param.param('quoting.status') p.send({'status': False})
import f as features import new_new_objects as objects import param the_dict = {'data_list_file':'e.pl', 'edge_feature_list':[features.xW, features.akW], 'node_feature_list':[features.beW, features.bbW, features.ayW, features.xW, features.vW, features.uW, features.wW, features.zW], 'dist_cut_off':5, 'pdb_name':'2jcw', 'chain_letter':'A', 'reg':100, 'mfmi':100, 'wif':0, 'wfld':0, 'nfld':2, 'ns':2, "wob":0, 'wob2':2, 'evalue':1e-10, 'pos1':1, 'pos2':2, 'wreg':1, 'trun':99, 'wclf':objects.pW, 'svmC':10, 'lgn':9, 'lgc':5, 'pos':45, 'nwc':-1.0, 'micut':5, 'wtpr':0, 'posw':150, 'sfc':1, 'self':False} the_params = param.param(the_dict)