Example #1
0
def choice_1_COL_RANGE_FIRST(dict_nouns):
  def _reset_params():
    Parameters.PARAMETERS_CHOICE_0_1.CHOICE_1_COL_RANGE_FIRST = 1.01
  def _change_params_for_step(cur_step,alpha):
    _reset_params()
    Parameters.PARAMETERS_CHOICE_0_1.CHOICE_1_COL_RANGE_FIRST += cur_step*alpha

  _reset_params()
  _alpha = 0.02
  _max_step = 10
  _current_step = 0

  _best_result = 0
  _best_params = Parameters.get_current_params()

  while _current_step <= _max_step:
    (precision, recall, accuracy) = SimilarityWordnetOxford.similarityWords(dict_nouns)
    if accuracy > _best_result:
      _best_result = accuracy
      _best_params = Parameters.get_current_params()
    _current_step += 1

    _change_params_for_step(_current_step, _alpha)

  _reset_params()
  return (_best_result,_best_params)
Example #2
0
def nbest_similarity(dict_nouns):
  def _reset_params():
    Parameters.PARAMETERS.N_BEST_CALCULATE_SIMILARITY = 1
  def _change_params_for_step(cur_step, alpha):
    _reset_params()
    Parameters.PARAMETERS.N_BEST_CALCULATE_SIMILARITY += cur_step*alpha

  _reset_params()
  _alpha = 1
  _max_step = 9
  _current_step = 0

  _best_result = 0
  _best_params = Parameters.get_current_params()

  while _current_step <= _max_step:
    (precision, recall, accuracy) = SimilarityWordnetOxford.similarityWords(dict_nouns)
    if accuracy > _best_result:
      _best_result = accuracy
      _best_params = Parameters.get_current_params()

    _current_step += 1

    _change_params_for_step(_current_step,_alpha)

  _reset_params()

  return (_best_result,_best_params)
Example #3
0
def jaccard_weight(dict_nouns):
  def _reset_params():
    Parameters.PARAMETERS.JACCARD_WEIGHT = 0.01
  def _change_params_for_step(cur_step, alpha):
    _reset_params()
    Parameters.PARAMETERS.JACCARD_WEIGHT += cur_step*alpha

  _reset_params()
  _alpha = 0.02
  _max_step = 20
  _current_step = 0

  _best_result = 0
  _best_params = Parameters.get_current_params()

  while _current_step <= _max_step:
    (precision, recall, accuracy) = SimilarityWordnetOxford.similarityWords(dict_nouns)
    if accuracy > _best_result:
      _best_result = accuracy
      _best_params = Parameters.get_current_params()
    _current_step += 1

    _change_params_for_step(_current_step,_alpha)

  _reset_params()
  return (_best_result,_best_params)
Example #4
0
def feature_POS(dict_nouns):
  def _reset_params():
    Parameters.PARAMETERS.POS_FEATURE_n = 1
    Parameters.PARAMETERS.POS_FEATURE_v = 0
  def _change_params_for_step(cur_step):
    _reset_params()
    if cur_step == 1:
      Parameters.PARAMETERS.POS_FEATURE_v = 1

  _reset_params()
  _max_step = 1
  _current_step = 0

  _best_result = 0
  _best_params = Parameters.get_current_params()

  while _current_step <= _max_step:
    (precision, recall, accuracy) = SimilarityWordnetOxford.similarityWords(dict_nouns)
    if accuracy > _best_result:
      _best_result = accuracy
      _best_params = Parameters.get_current_params()
    _current_step += 1

    _change_params_for_step(_current_step)

  _reset_params()
  return (_best_result,_best_params)
Example #5
0
def feature_dict(dict_nouns):
  def _reset_params():
    Parameters.PARAMETERS.DICT_OX_FEATURE_RELATION_sd = 1
    Parameters.PARAMETERS.DICT_OX_FEATURE_RELATION_d = 1
    Parameters.PARAMETERS.DICT_OX_FEATURE_RELATION_xh = 1
    Parameters.PARAMETERS.DICT_OX_FEATURE_RELATION_x = 1
  def _change_params_for_step(cur_step):
    _reset_params()
    if cur_step == 1:
      Parameters.PARAMETERS.DICT_OX_FEATURE_RELATION_xh = 0
    elif cur_step == 2:
      Parameters.PARAMETERS.DICT_OX_FEATURE_RELATION_x = 0
    elif cur_step == 3:
      Parameters.PARAMETERS.DICT_OX_FEATURE_RELATION_xh = 0
      Parameters.PARAMETERS.DICT_OX_FEATURE_RELATION_x = 0

  _reset_params()
  _max_step = 3
  _current_step = 0

  _best_result = 0
  _best_params = Parameters.get_current_params()

  while _current_step <= _max_step:
    (precision, recall, accuracy) = SimilarityWordnetOxford.similarityWords(dict_nouns)
    if accuracy > _best_result:
      _best_result = accuracy
      _best_params = Parameters.get_current_params()
    _current_step += 1

    _change_params_for_step(_current_step)

  _reset_params()
  return (_best_result,_best_params)
Example #6
0
def feature_wn(dict_nouns):
  def _reset_params():
    Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_definition = 1
    Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_hypernyms = 0
    Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_hyponyms = 0
    Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_part_meronyms = 0
    Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_member_holonyms = 0
  def _change_params_for_step(cur_step):
    _reset_params()
    if cur_step == 1:
      Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_hypernyms = 1
    elif cur_step == 2:
      Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_hyponyms = 1
    elif cur_step == 3:
      Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_part_meronyms = 1
    elif cur_step == 4:
      Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_member_holonyms = 1
    elif cur_step == 5:
      Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_hypernyms = 1
      Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_hyponyms = 1
    elif cur_step == 6:
      Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_hypernyms = 1
      Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_part_meronyms = 1
    elif cur_step == 7:
      Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_hypernyms = 1
      Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_member_holonyms = 1
    elif cur_step == 8:
      Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_hypernyms = 1
      Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_hyponyms = 1
      Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_part_meronyms = 1
    elif cur_step == 9:
      Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_hypernyms = 1
      Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_hyponyms = 1
      Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_member_holonyms = 1
    elif cur_step == 10:
      Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_hypernyms = 1
      Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_hyponyms = 1
      Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_part_meronyms = 1
      Parameters.PARAMETERS.DICT_WN_FEATURE_RELATION_member_holonyms = 1


  _reset_params()
  _max_step = 10
  _current_step = 0

  _best_result = 0
  _best_params = Parameters.get_current_params()

  while _current_step <= _max_step:
    (precision, recall, accuracy) = SimilarityWordnetOxford.similarityWords(dict_nouns)
    if accuracy > _best_result:
      _best_result = accuracy
      _best_params = Parameters.get_current_params()
    _current_step += 1

    _change_params_for_step(_current_step)

  _reset_params()
  return (_best_result,_best_params)
def append_result_to_file(precision,recall,accuracy):

  params_value = Parameters.get_current_params()
  params_value.append(precision)
  params_value.append(recall)
  params_value.append(accuracy)

  append_params_and_result_to_file(params_value)
Example #8
0
    def search(self):
        #两个发送请求的主网页,知网需要两次发送请求,一次为参数请求,一次为返回页面请求
        url='http://epub.cnki.net/KNS/request/SearchHandler.ashx?action=&NaviCode=*&'
        url2='http://epub.cnki.net/kns/brief/brief.aspx?'
        
        #生成cookie
        cookie = cookielib.CookieJar()

        #创建一个新的opener来使用cookiejar
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie),urllib2.HTTPHandler)
        
        #构建头结构,模拟浏览器
        #httplib.HTTPConnection.debuglevel = 1
        hosturl='http://epub.cnki.net/kns/brief/result.aspx?dbprefix=scdb&action=scdbsearch&db_opt=SCDB'
        headers={'Connection':'Keep-Alive',
                 'Accept':'text/html,*/*',
                 'User-Agent':'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.66 Safari/537.36',
                 'Referer':hosturl}
        
        #通过chorme抓包获取提交参数,解析提交参数,CNKI为utf-8编码而非gbk编码
        #知网的参数编码是UTF8编码,所以中文需要先gbk解码再进行utf-8编码
        #再将参数url编码,编码顺序并不影响提交成果
        parameters=Parameters.parameters()
        postdata=urllib.urlencode(parameters)
        
        #构建第二次提交参数,不过貌似这些参数对返回值没有影响,尝试了修改keyValue和spvalue依然能正常返回
        query_string=urllib.urlencode({'pagename':'ASP.brief_result_aspx','DbCatalog':'中国学术文献网络出版总库',
                                       'ConfigFile':'SCDB.xml','research':'off','t':int(time.time()),
                                       'keyValue':'','dbPrefix':'SCDB',
                                       'S':'1','spfield':'SU','spvalue':'',
                                       })
        
        #实施第一步提交申请
        req=urllib2.Request(url+postdata,headers=headers)
        html=opener.open(req).read()
        with open('web1.html','w') as e:
            e.write(html)

        #第二步提交申请,第二步提交后的结果就是查询结果
        req2=urllib2.Request(url2+query_string,headers=headers)
        result2 = opener.open(req2)
        html2=result2.read()
        #打印cookie值,如果需要下载文章的话还需要登陆处理
        for item in cookie:
            print 'Cookie:%s:/n%s/n'%(item.name,item.value)
        with open('web2.html','w') as e:
            e.write(html2)
        
        print self.Regular(html)

        def Regular(self,html):
            reg='<a href="(.*?)"\ttarget'
            comlists=re.findall(re.compile(reg),html)
            return comlists
Example #9
0
def train_from_base(dict_nouns):
  # run this feature in range -> choice best result
  lower = -1
  previous_random = -1;
  best_result_loop = 0
  best_params_loop = Parameters.get_current_params()
  while lower < 7:
    chosen_feature = random.randint(0,6)
    while chosen_feature == previous_random:
      chosen_feature = random.randint(0,6)
    previous_random = chosen_feature
    best_result = 0
    best_params = []
    if lower >= 0:
      chosen_feature = lower

    if chosen_feature == 0:
      (best_result, best_params) = jaccard_weight(dict_nouns)
    elif chosen_feature == 1:
      (best_result, best_params) = choice_1_COL_RANGE_FIRST(dict_nouns)
    elif chosen_feature == 2:
      (best_result, best_params) = choice_N_N_RANGE_FIRST(dict_nouns)
    elif chosen_feature == 3:
      (best_result, best_params) = feature_wn(dict_nouns)
    elif chosen_feature == 4:
      (best_result, best_params) = feature_dict(dict_nouns)
    elif chosen_feature == 5:
      (best_result, best_params) = feature_POS(dict_nouns)
    else:
      (best_result, best_params) = nbest_similarity(dict_nouns)
    # compare with _best
    if best_result >= best_result_loop:
      best_result_loop = best_result
      best_params_loop = best_params
      lower = -1
    else:
      lower += 1

    Parameters.set_params_from_arr(best_params_loop)

  return (best_result_loop, best_params_loop)
Example #10
0
def train_with_random(dict_nouns):
  _best_result = 0;
  _best_params = Parameters.get_current_params()

  for i in range(0,1000):
    # random_all_feature
    Parameters.random_params_values()
    # calculate_result -> current best
    (precision, recall, accuracy) = SimilarityWordnetOxford.similarityWords(dict_nouns)
    if accuracy > _best_result:
      _best_result = accuracy
      _best_params = Parameters.get_current_params()

    (best_result_loop, best_params_loop) = train_from_base(dict_nouns)

    if best_result_loop>= _best_result:
        _best_result = best_result_loop
        _best_params = best_params_loop
        Parameters.set_params_from_arr(_best_params)
        WriteParametersAndResult.append_params_and_result_to_file(_best_params)
Example #11
0
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal.windows import general_hamming
import Parameters
import os
from datetime import datetime

param = Parameters.Parameters()
# DEMOSTRATION OF NADIR ECHO SUPPRESSION WITH TWO POINT TARGETS AND UP- AND DOWN- CHIRPS

# Constants

c0 = param.c  # speed of light in vacuum [m/s]
R_E = param.R_EARTH  # Earth's mean radius [m]

# System parameters

lamb = param.wavelength  # wavelength [m]
h = param.height  # orbit height [m]
CR = param.cr  # chirp compression ratio
B = param.chirp_bandwidth  # chirp bandwidth [Hz]
tau = param.chirp_duration  # uncompressed pulse length [s]
f_s = param.rg_f_s  # range sampling frequency [Hz]
PRF = param.PRF  # pulse repetition frequency [Hz]
L = param.antenna_length  # antenna length [m]
R0 = param.R0  # target distance [m]

# Processing parameters

hamming_rg = param.hamming_rg  # Hamming window in range (0 -> no, 1 -> yes)
hamming_rg_alpha = param.hamming_rg_alpha  # alpha coefficient for the Hamming window in range
Example #12
0
import Parameters, Bee

print("teste2")
p = Parameters.Params("par")
#print(p.dim, p.SN, p.MCN, p.limit, p.MCN, p.MCN, p.scoutnum, p.onlnum, p.lowBound, p.uppBound, p.funcName, p.size)
b = Bee.Bee(p)
print(b.bias)
print(b.weights)
#criar bottom-top
#exemplo de rede 3-2-1
l3 = Bee.Layer(1)
l2 = Bee.Layer(2, None, l3)
l = Bee.Layer(3, None, l2)
l2.prev = l

print("l3:", l3.weights, l3.bias)
print("l2:", l2.weights, l2.bias)
print("l1:", l.weights, l.bias)
Example #13
0
def ClassifyERPs(
    featurefiles,
    C=(10.0, 1.0, 0.1, 0.01),
    gamma=(1.0, 0.8, 0.6, 0.4, 0.2, 0.0),
    keepchan=(),
    rmchan=(),
    rmchan_usualsuspects=('AUDL', 'AUDR', 'LAUD', 'RAUD', 'SYNC', 'VSYNC',
                          'VMRK', 'OLDREF'),
    rebias=True,
    save=False,
    select=False,
    description='ERPs to attended vs unattended events',
    maxcount=None,
    classes=None,
    folds=None,
    time_window=None,
    keeptrials=None,
):

    file_inventory = []
    d = DataFiles.load(featurefiles,
                       catdim=0,
                       maxcount=maxcount,
                       return_details=file_inventory)
    if isinstance(folds, basestring) and folds.lower() in [
            'lofo', 'loro', 'leave on run out', 'leave one file out'
    ]:
        n, folds = 0, []
        for each in file_inventory:
            neach = each[1]['x']
            folds.append(range(n, n + neach))
            n += neach

    if 'x' not in d:
        raise ValueError(
            "found no trial data - no 'x' variable - in the specified files")
    if 'y' not in d:
        raise ValueError(
            "found no trial labels - no 'y' variable - in the specified files")

    x = d['x']
    y = numpy.array(d['y'].flat)
    if keeptrials != None:
        x = x[numpy.asarray(keeptrials), :, :]
        y = y[numpy.asarray(keeptrials)]

    if time_window != None:
        fs = d['fs']
        t = SigTools.samples2msec(numpy.arange(x.shape[2]), fs)
        x[:, :, t < min(time_window)] = 0
        x[:, :, t > max(time_window)] = 0

    if classes != None:
        for cl in classes:
            if cl not in y:
                raise ValueError("class %s is not in the dataset" % str(cl))
        mask = numpy.array([yi in classes for yi in y])
        y = y[mask]
        x = x[mask]
        discarded = sum(mask == False)
        if discarded:
            print "discarding %d trials that are outside the requested classes %s" % (
                discarded, str(classes))

    n = len(y)
    uy = numpy.unique(y)
    if uy.size != 2:
        raise ValueError("expected 2 classes in dataset, found %d : %s" %
                         (uy.size, str(uy)))
    for uyi in uy:
        nyi = sum([yi == uyi for yi in y])
        nyi_min = 2
        if nyi < nyi_min:
            raise ValueError(
                'only %d exemplars of class %s - need at least %d' %
                (nyi, str(uyi), nyi_min))

    y = numpy.sign(y - uy.mean())

    cov, trchvar = SigTools.spcov(
        x=x, y=y, balance=False,
        return_trchvar=True)  # NB: symwhitenkern would not be able to balance

    starttime = time.time()

    chlower = [ch.lower() for ch in d['channels']]
    if keepchan in [None, (), '', []]:
        if isinstance(rmchan, basestring): rmchan = rmchan.split()
        if isinstance(rmchan_usualsuspects, basestring):
            rmchan_usualsuspects = rmchan_usualsuspects.split()
        allrmchan = [
            ch.lower() for ch in list(rmchan) + list(rmchan_usualsuspects)
        ]
        unwanted = numpy.array([ch in allrmchan for ch in chlower])
        notfound = [ch for ch in rmchan if ch.lower() not in chlower]
    else:
        if isinstance(keepchan, basestring): keepchan = keepchan.split()
        lowerkeepchan = [ch.lower() for ch in keepchan]
        unwanted = numpy.array([ch not in lowerkeepchan for ch in chlower])
        notfound = [ch for ch in keepchan if ch.lower() not in chlower]

    wanted = numpy.logical_not(unwanted)
    print ' '
    if len(notfound):
        print "WARNING: could not find channel%s %s\n" % ({
            1: ''
        }.get(len(notfound), 's'), ', '.join(notfound))
    removed = [ch for removing, ch in zip(unwanted, d['channels']) if removing]
    if len(removed):
        print "removed %d channel%s (%s)" % (len(removed), {
            1: ''
        }.get(len(removed), 's'), ', '.join(removed))
    print "classification will be based on %d channel%s" % (sum(wanted), {
        1: ''
    }.get(sum(wanted), 's'))
    print "%d negatives + %d positives = %d exemplars" % (sum(y < 0),
                                                          sum(y > 0), n)
    print ' '

    x[:, unwanted, :] = 0
    cov[:, unwanted] = 0
    cov[unwanted, :] = 0
    nu = numpy.asarray(cov).diagonal()[wanted].mean()
    for i in range(len(cov)):
        if cov[i, i] == 0: cov[i, i] = nu

    if not isinstance(C, (tuple, list, numpy.ndarray, type(None))): C = [C]
    if not isinstance(gamma, (tuple, list, numpy.ndarray, type(None))):
        gamma = [gamma]

    c = SigTools.klr2class(lossfunc=SigTools.balanced_loss, relcost='balance')
    c.varyhyper({})
    if c != None: c.hyper.C = list(C)
    if gamma == None: c.hyper.kernel.func = SigTools.linkern
    else:
        c.varyhyper({
            'kernel.func': SigTools.symwhitenkern,
            'kernel.cov': [cov],
            'kernel.gamma': list(gamma)
        })
    c.cvtrain(x=x, y=y, folds=folds)
    if rebias: c.rebias()
    c.calibrate()

    chosen = c.cv.chosen.hyper
    if gamma == None:
        Ps = None
        Gp = c.featureweight(x=x)
    else:
        Ps = SigTools.svd(
            SigTools.shrinkcov(cov, copy=True,
                               gamma=chosen.kernel.gamma)).isqrtm
        xp = SigTools.spfilt(x, Ps.H, copy=True)
        Gp = c.featureweight(x=xp)

    u = SigTools.stfac(Gp, Ps)
    u.channels = d['channels']
    u.channels_used = wanted
    u.fs = d['fs']
    u.trchvar = trchvar
    try:
        u.channels = SigTools.ChannelSet(u.channels)
    except:
        print 'WARNING: failed to convert channels to ChannelSet'

    elapsed = time.time() - starttime
    minutes = int(elapsed / 60.0)
    seconds = int(round(elapsed - minutes * 60.0))
    print '%d min %d sec' % (minutes, seconds)
    datestamp = time.strftime('%Y-%m-%d %H:%M:%S')
    csummary = '%s (%s) trained on %d (CV %s = %.3f) at %s' % (
        c.__class__.__name__,
        SigTools.experiment()._shortdesc(chosen),
        sum(c.input.istrain),
        c.loss.func.__name__,
        c.loss.train,
        datestamp,
    )
    description = 'binary classification of %s: %s' % (description, csummary)
    u.description = description

    if save or select:
        if not isinstance(save, basestring):
            save = featurefiles
            if isinstance(save, (tuple, list)): save = save[-1]
            if save.lower().endswith('.gz'): save = save[:-3]
            if save.lower().endswith('.pk'): save = save[:-3]
            save = save + '_weights.prm'
        print "\nsaving %s\n" % save
        Parameters.Param(u.G.A,
                         Name='ERPClassifierWeights',
                         Section='PythonSig',
                         Subsection='Epoch',
                         Comment=csummary).write_to(save)
        Parameters.Param(c.model.bias,
                         Name='ERPClassifierBias',
                         Section='PythonSig',
                         Subsection='Epoch',
                         Comment=csummary).append_to(save)
        Parameters.Param(description,
                         Name='SignalProcessingDescription',
                         Section='PythonSig').append_to(save)
        if select:
            if not isinstance(select, basestring): select = 'ChosenWeights.prm'
            if not os.path.isabs(select):
                select = os.path.join(os.path.split(save)[0], select)
            print "saving %s\n" % select
            import shutil
            shutil.copyfile(save, select)

    print description
    return u, c
sys.path.insert(0, _root_dir + os.sep + "CommonUtils")
import Parameters
import Mongo_Connector

def gene_to_lower():
    '''append chr prefix.'''
    starttime = time.time()
    cursor = mongodb.find('ensgenes', {}, {"name":1, "_id":0})
    for gene in cursor:
        # print "gene.name = %s" % (gene['name'])
        mongodb.update('ensgenes', {"name":gene['name']}, {'$set': {'namelc':gene['name'].lower()}})
    print 'Done in %i seconds' % (time.time() - starttime)




if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    # parser.add_argument("csvbeta", help = "The file name of the CSV beta file to import", type = str)
    parser.add_argument("-dbconfig", help = "An optional file to specify the database location - default is database.conf in MongoDB directory", type = str, default = None)
    parser.add_argument("-dbname", help = "name of the Database in the Mongo implementation to use - default is provided in the database.conf file specified", type = str, default = None)
    args = parser.parse_args()
    p = Parameters.parameter(args.dbconfig)
    if args.dbname:
        p.set("default_database", args.dbname)
    mongodb = Mongo_Connector.MongoConnector(p.get('server'), p.get('port'), p.get('default_database'))
    gene_to_lower()
    mongodb.close()


Example #15
0
import Logger
import Parameters

#Create Reference Code
code = ''.join(
    random.choices(string.ascii_uppercase + string.digits +
                   string.ascii_lowercase,
                   k=16))

#Create the Logger
logD = Logger.logs(code)

logD.info('Reference Code: ' + code)

#Get Parameters from the Text File
queryParam = Parameters.getQuery(logD)
queryParam.OAuth()
parameters = queryParam.getParam()

#Variables definition
companies = parameters[0]
toDate = parameters[1]
fromDate = parameters[2]
consumer_key = parameters[3]
consumer_secret = parameters[4]
devEnv = parameters[5]

#API Connection & Authentication
cnn = downloadTweet.tweets(consumer_key, consumer_secret, logD, devEnv)
cnn.auth()
Example #16
0
    def __init__(self):

        super(QWidget, self).__init__()

        # настрйока интерфеса окна
        self.setFont(QFont('Century Gothic', 10))
        self.setWindowIcon(QIcon('logo.png'))
        self.setWindowTitle("Одноразовый договор")
        window_w1 = Parameters.ParameterSize().ww()
        window_h1 = Parameters.ParameterSize().wh()
        button_color = Parameters.Color().whatcolor()
        self.setFixedSize(window_w1 * 0.5, window_h1 * 0.5)

        hbox = QHBoxLayout()
        hbox2 = QHBoxLayout()
        hbox3 = QHBoxLayout()
        hbox4 = QHBoxLayout()
        vbox = QVBoxLayout()

        # поля ввода для реквизитов
        self.company_line = QLineEdit()
        self.company_line.setPlaceholderText('Представитель')
        self.dir_dol_line = QLineEdit()
        self.dir_dol_line.setPlaceholderText('Должность Представителя')
        self.dir_fio_line = QLineEdit()
        self.dir_fio_line.setPlaceholderText('ФИО Представителя')

        self.adr_line = QLineEdit()
        self.adr_line.setPlaceholderText('Адрес')
        self.tel_line = QLineEdit()
        self.tel_line.setPlaceholderText('Телефон')
        self.inn_line = QLineEdit()
        self.inn_line.setPlaceholderText('ИНН')
        self.email_line = QLineEdit()
        self.email_line.setPlaceholderText('@mail')

        self.r_s_line = QLineEdit()
        self.r_s_line.setPlaceholderText('р/cчёт')
        self.k_s_line = QLineEdit()
        self.k_s_line.setPlaceholderText('к/cчёт')
        self.kpp_line = QLineEdit()
        self.kpp_line.setPlaceholderText('КПП')

        self.bik_line = QLineEdit()
        self.bik_line.setPlaceholderText('БИК')
        self.bank_line = QLineEdit()
        self.bank_line.setPlaceholderText('Банк')

        # кнопка создания документа
        but = QPushButton("Создать документ", self)
        but.setStyleSheet("background-color: {0}".format(button_color))
        but.clicked.connect(self.ccreate)

        # компановка объектов в окне
        hbox.addWidget(self.company_line)
        hbox.addWidget(self.dir_dol_line)
        hbox.addWidget(self.dir_fio_line)

        hbox2.addWidget(self.adr_line)
        hbox2.addWidget(self.tel_line)
        hbox2.addWidget(self.inn_line)

        hbox3.addWidget(self.r_s_line)
        hbox3.addWidget(self.k_s_line)
        hbox3.addWidget(self.kpp_line)

        hbox4.addWidget(self.bik_line)
        hbox4.addWidget(self.bank_line)
        hbox4.addWidget(self.email_line)

        vbox.addLayout(hbox)
        vbox.addLayout(hbox2)
        vbox.addLayout(hbox3)
        vbox.addLayout(hbox4)
        vbox.addWidget(but, alignment=QtCore.Qt.AlignHCenter)

        self.setLayout(vbox)
Example #17
0
import Definitions
from Graphs import run_episode
import sys
"""
Example file for postprocessing 
"""

Hooks = importlib.import_module(Parameters.MODEL_NAME + ".Hooks")

global POST_PROCESSING
POST_PROCESSING = True

tf.get_logger().setLevel('CRITICAL')

pd.set_option('display.max_columns', None)
starting_policy = Parameters.policy(Parameters.starting_state)

Equations = importlib.import_module(Parameters.MODEL_NAME + ".Equations")

#for i, s in enumerate(Parameters.states):
#for ps in Parameters.policy_states:
#plt.plot(getattr(State,s)(Parameters.starting_state).numpy(), getattr(PolicyState,ps)(starting_policy).numpy(), 'bs')
## add policy lines at min / max / median
#state_where_policy_is_min = tf.math.argmin(getattr(PolicyState,ps)(starting_policy))
#state_where_policy_is_max = tf.math.argmax(getattr(PolicyState,ps)(starting_policy))

#test_states = tf.sort(getattr(State,s)(Parameters.starting_state) * 4 - 3*tf.math.reduce_mean(getattr(State,s)(Parameters.starting_state)))

#states_lower = tf.tile(tf.expand_dims(Parameters.starting_state[state_where_policy_is_min,:],axis=0),[starting_policy.shape[0],1])
#states_lower = tf.tensor_scatter_nd_update(states_lower,[[j,i] for j in range(Parameters.starting_state.shape[0])], test_states)
from DMRGalgorithms import dmrg_finite_size
import Parameters as Pm
import numpy as np
from os import path
from BasicFunctions import load_pr, save_pr, plot, output_txt
from TensorBasicModule import open_mps_product_state_spin_up

length = list(range(3, 30))

lattice = 'chain'
para_dmrg = Pm.generate_parameters_dmrg(lattice)
para_dmrg['spin'] = 'half'
para_dmrg['bound_cond'] = 'periodic'
para_dmrg['chi'] = 128
para_dmrg['l'] = 12
para_dmrg['jxy'] = 1
para_dmrg['jz'] = 1
para_dmrg['hx'] = 0
para_dmrg['hz'] = 0
para_dmrg['project_path'] = '.'
para_dmrg['data_path'] = 'data/HeisenbergChain'

for n in range(len(length)):
    para_dmrg['l'] = length[n]
    para_dmrg = Pm.make_consistent_parameter_dmrg(para_dmrg)
    ob, a, info, para = dmrg_finite_size(para_dmrg)
    print('Energy per site = ' + str(ob['e_per_site']))
    save_pr(para['data_path'], para['data_exp'] + '.pr', (ob, a, info, para),
            ('ob', 'a', 'info', 'para'))

# if path.isfile(path.join(para_dmrg['data_path'], para_dmrg['data_exp'] + '.pr')):
Example #19
0
        self.pslg = FemIo.loadEle(self.eleFilename)
        self.parameters.initialize(self.pslg)

        #Load results
        self.results = FemIo.loadFem(self.femFilename)

        #Iterate over parameters
        spatialSums = []
        for result in self.results:
            currentSum = 0.0
            for segment in self.parameters.omegaD:
                rhoStart = result[1][segment[0].startpoint.index]
                rhoEnd = result[1][segment[0].endpoint.index]
                currentSum += segment[0].getLength() * (rhoStart +
                                                        rhoEnd) / 2.0
            spatialSums.append((result[0], currentSum))

        vesicleFile = open(self.vesiclesFilename, "w")
        for i in range(0, len(spatialSums) - 1):
            alphaValue = self.parameters.releaseEfficiency(spatialSums[i][0])
            value = alphaValue * parameters.deltaT * (
                spatialSums[i][1] + spatialSums[i + 1][1]) / 2.0
            vesicleFile.write(
                str(spatialSums[i][0]) + "\t" + str(value) + "\n")
        vesicleFile.close()


if __name__ == '__main__':
    parameters = Parameters.Parameters()
    releaseCalculator = ReleaseCalculator(parameters)
    releaseCalculator.run()
Example #20
0
def run_experiment(args):
    parameters = Parameters.processArguments(args, __doc__)

    #if the nnFile is a directory, check for a previous experiment run in it and start from there
    #load its parameters, append to its evalresults file, open its largest network file
    #If its none, create a experiment directory. create a results file, save parameters, save network files here. 

    experimentDirectory = parameters.rom + "_" + time.strftime("%d-%m-%Y-%H-%M") +"/"
    resultsFileName = experimentDirectory + "results.csv"
    startingEpoch = 1
    if parameters.nnFile is None or parameters.nnFile.endswith(".pkl"):
        #Create your experiment directory, results file, save parameters
        if not os.path.isdir(experimentDirectory):
            os.mkdir(experimentDirectory)

        resultsFile = open(resultsFileName, "a")
        resultsFile.write("Epoch,\tAverageReward,\tMean Q Value\n")
        resultsFile.close()

        parametersFile = open(experimentDirectory + "parameters.pkl" , 'wb', -1)
        cPickle.dump(parameters,parametersFile)
        parametersFile.close()


    if parameters.nnFile is not None and os.path.isdir(parameters.nnFile):
        #Found a experiment directory
        if not parameters.nnFile.endswith("/"):
            parameters.nnFile += "/"

        experimentDirectory = parameters.nnFile
        resultsFileName = experimentDirectory + "results.csv"

        if os.path.exists(experimentDirectory + "parameters.pkl"):
            parametersFile = open(experimentDirectory + "parameters.pkl" , 'rb')
            parameters = cPickle.load(parametersFile)
            parametersFile.close()
        else:
            parametersFile = open(experimentDirectory + "parameters.pkl" , 'wb', -1)
            cPickle.dump(parameters,parametersFile)
            parametersFile.close()

        contents = os.listdir(experimentDirectory)
        networkFiles = []
        for handle in contents:
            if handle.startswith("network") and handle.endswith(".pkl"):
                networkFiles.append(handle)

        if len(networkFiles) == 0:
            #Found a premature experiment, didnt finish a single training epoch
            parameters.nnFile = None
        else:
            #Found a previous experiments network files, now find the highest epoch number
            highestNNFile = networkFiles[0]
            highestNetworkEpochNumber = int(highestNNFile[highestNNFile.index("_") + 1 : highestNNFile.index(".")])
            for networkFile in networkFiles:
                networkEpochNumber =  int(networkFile[networkFile.index("_") + 1 : networkFile.index(".")])
                if networkEpochNumber > highestNetworkEpochNumber:
                    highestNNFile = networkFile
                    highestNetworkEpochNumber = networkEpochNumber

            startingEpoch = highestNetworkEpochNumber + 1
            #dont use full exploration, its not a good way to fill the replay memory when we already have a decent policy
            if startingEpoch > 1:
                parameters.epsilonStart = parameters.epsilonEnd

            parameters.nnFile = experimentDirectory + highestNNFile
            print "Loaded experiment: " + experimentDirectory + "\nLoaded network file:" + highestNNFile


    sys.setrecursionlimit(10000)
    ale = ALEInterface()

    Environment.initializeALEParameters(ale, parameters.seed, parameters.frameSkip, parameters.repeatActionProbability, parameters.displayScreen)
    ale.loadROM(parameters.fullRomPath)
    minimalActions = ale.getMinimalActionSet()


    agent = DQNAgent.DQNAgent(minimalActions, parameters.croppedHeight, parameters.croppedWidth, 
                parameters.batchSize, 
                parameters.phiLength,
                parameters.nnFile, 
                parameters.loadWeightsFlipped, 
                parameters.updateFrequency, 
                parameters.replayMemorySize, 
                parameters.replayStartSize,
                parameters.networkType, 
                parameters.updateRule, 
                parameters.batchAccumulator, 
                parameters.networkUpdateDelay,
                parameters.discountRate, 
                parameters.learningRate, 
                parameters.rmsRho, 
                parameters.rmsEpsilon, 
                parameters.momentum,
                parameters.epsilonStart, 
                parameters.epsilonEnd, 
                parameters.epsilonDecaySteps,
                parameters.evalEpsilon,
                parameters.useSARSAUpdate,
                parameters.kReturnLength)



    for epoch in xrange(startingEpoch, parameters.epochs + 1):
        agent.startTrainingEpoch(epoch)
        runTrainingEpoch(ale, agent, epoch, parameters.stepsPerEpoch)
        agent.endTrainingEpoch(epoch)

        networkFileName = experimentDirectory + "network_" + str(epoch) + ".pkl"
        DeepNetworks.saveNetworkParams(agent.network.qValueNetwork, networkFileName)

        if parameters.stepsPerTest > 0 and epoch % parameters.evaluationFrequency == 0:
            agent.startEvaluationEpoch(epoch)
            avgReward = runEvaluationEpoch(ale, agent, epoch, parameters.stepsPerTest)
            holdoutQVals = agent.computeHoldoutQValues(3200)

            resultsFile = open(resultsFileName, 'a')
            resultsFile.write(str(epoch) + ",\t" + str(round(avgReward, 4)) + ",\t\t" + str(round(holdoutQVals, 4)) + "\n")
            resultsFile.close()

            agent.endEvaluationEpoch(epoch)

    agent.agentCleanup()
Example #21
0
def getScheduleFromJsonObject(parameters):
    params = Utilities.WrappedDictionary("missing parameter", parameters)
    Parameters.validateAndMassageParameters(params)
    logging.debug(params)
    (stri, sched) = dr.makeSchedule(params)
    return (stri, sched)
Example #22
0
def run_experiment(args):
    parameters = Parameters.processArguments(args, __doc__)

    #if the nnFile is a directory, check for a previous experiment run in it and start from there
    #load its parameters, append to its evalresults file, open its largest network file
    #If its none, create a experiment directory. create a results file, save parameters, save network files here. 

    experimentDirectory = parameters.rom + "_" + time.strftime("%d-%m-%Y-%H-%M") +"/"
    resultsFileName = experimentDirectory + "results.csv"
    startingEpoch = 0
    if parameters.nnFile is None or parameters.nnFile.endswith(".pkl"):
        #Create your experiment directory, results file, save parameters
        if not os.path.isdir(experimentDirectory):
            os.mkdir(experimentDirectory)

        resultsFile = open(resultsFileName, "a")
        resultsFile.write("Epoch,\tAverageReward,\tMean Q Value\n")
        resultsFile.close()

        parametersFile = open(experimentDirectory + "parameters.pkl" , 'wb', -1)
        cPickle.dump(parameters,parametersFile)
        parametersFile.close()


    if parameters.nnFile is not None and os.path.isdir(parameters.nnFile):
        #Found a experiment directory
        if not parameters.nnFile.endswith("/"):
            parameters.nnFile += "/"

        experimentDirectory = parameters.nnFile
        resultsFileName = experimentDirectory + "results.csv"

        if os.path.exists(experimentDirectory + "parameters.pkl"):
            parametersFile = open(experimentDirectory + "parameters.pkl" , 'rb')
            parameters = cPickle.load(parametersFile)
            parametersFile.close()
        else:
            parametersFile = open(experimentDirectory + "parameters.pkl" , 'wb', -1)
            cPickle.dump(parameters,parametersFile)
            parametersFile.close()

        contents = os.listdir(experimentDirectory)
        networkFiles = []
        for handle in contents:
            if handle.startswith("network") and handle.endswith(".pkl"):
                networkFiles.append(handle)

        if len(networkFiles) == 0:
            #Found a premature experiment, didnt finish a single training epoch
            parameters.nnFile = None
        else:
            #Found a previous experiments network files, now find the highest epoch number
            highestNNFile = networkFiles[0]
            highestNetworkEpochNumber = int(highestNNFile[highestNNFile.index("_") + 1 : highestNNFile.index(".")])
            for networkFile in networkFiles:
                networkEpochNumber =  int(networkFile[networkFile.index("_") + 1 : networkFile.index(".")])
                if networkEpochNumber > highestNetworkEpochNumber:
                    highestNNFile = networkFile
                    highestNetworkEpochNumber = networkEpochNumber

            startingEpoch = highestNetworkEpochNumber + 1
            #dont use full exploration, its not a good way to fill the replay memory when we already have a decent policy
            if startingEpoch > 4:
                parameters.epsilonStart = parameters.epsilonEnd

            parameters.nnFile = experimentDirectory + highestNNFile
            print "Loaded experiment: " + experimentDirectory + "\nLoaded network file:" + highestNNFile

    
    sys.setrecursionlimit(10000)
    ale = ALEInterface()

    Environment.initializeALEParameters(ale, parameters.seed, parameters.frameSkip, parameters.repeatActionProbability, parameters.displayScreen)

    # ale.loadROM(parameters.fullRomPath)

    # minimalActions = ale.getMinimalActionSet()

    # difficulties = ale.getAvailableDifficulties()
    # modes = ale.getAvailableModes()

    # maxNumFlavors = len(difficulties) * len(modes)

    # difficulties = createFlavorList(parameters.difficultyString, len(difficulties))
    # modes = createFlavorList(parameters.modeString, len(modes))

    # transferTaskModule = TransferTaskModule.TransferTaskModule(difficulties, modes)


    transferTaskModule = TransferTaskModule.TransferTaskModule(ale, parameters.roms, parameters.difficultyString, parameters.modeString, parameters.taskBatchFlag)
    numActionsToUse = transferTaskModule.getNumTotalActions()
    print "Number of total tasks:" + str(transferTaskModule.getNumTasks()) + " across " + str(transferTaskModule.getNumGames()) + " games."
    print "Actions List:" + str(transferTaskModule.getTotalActionsList())
    # print "Num difficulties: " + str(len(difficulties)) + " num modes: " + str(len(modes)) + " numtasks: " + str(transferTaskModule.getNumTasks())
    # print "Modes: " + str(modes)
    # print "Difficulties: " + str(difficulties)

    numTransferTasks = transferTaskModule.getNumTasks()

    if (parameters.reduceEpochLengthByNumFlavors):
        parameters.stepsPerEpoch = int(parameters.stepsPerEpoch / numTransferTasks)

    agent = DQTNAgent.DQTNAgent(transferTaskModule.getTotalActionsList(), parameters.croppedHeight, parameters.croppedWidth, 
                parameters.batchSize, 
                parameters.phiLength,
                parameters.nnFile, 
                parameters.loadWeightsFlipped, 
                parameters.updateFrequency, 
                parameters.replayMemorySize, 
                parameters.replayStartSize,
                parameters.networkType, 
                parameters.updateRule, 
                parameters.batchAccumulator, 
                parameters.networkUpdateDelay,
                transferTaskModule,
                parameters.transferExperimentType,
                numTransferTasks,
                parameters.discountRate, 
                parameters.learningRate, 
                parameters.rmsRho, 
                parameters.rmsEpsilon, 
                parameters.momentum,
                parameters.epsilonStart, 
                parameters.epsilonEnd, 
                parameters.epsilonDecaySteps,
                parameters.evalEpsilon,
                parameters.useSARSAUpdate,
                parameters.kReturnLength,
                parameters.deathEndsEpisode)



    for epoch in xrange(startingEpoch, parameters.epochs + 1):
        agent.startTrainingEpoch(epoch)
        runTrainingEpoch(ale, agent, epoch, parameters.stepsPerEpoch, transferTaskModule, parameters.frameSkip, parameters.maxNoActions)
        agent.endTrainingEpoch(epoch)

        networkFileName = experimentDirectory + "network_" + str(epoch) + ".pkl"
        DeepNetworks.saveNetworkParams(agent.network.qValueNetwork, networkFileName)

        print "Total number of samples seen per task: "
        print str(agent.trainingMemory.totalTaskSampleCount)

        if parameters.stepsPerTest > 0 and epoch % parameters.evaluationFrequency == 0:
            agent.startEvaluationEpoch(epoch)
            avgRewardPerTask = runEvaluationEpoch(ale, agent, epoch, parameters.stepsPerTest, transferTaskModule, parameters.frameSkip, parameters.maxNoActions)
            holdoutQVals = agent.computeHoldoutQValues(parameters.numHoldoutQValues)

            resultsFile = open(resultsFileName, 'a')
            resultsFile.write(str(epoch) + ",\t")
            resultsString = ""

            for avgReward in avgRewardPerTask:
                resultsString += str(round(avgReward, 4)) + ",\t"

            resultsFile.write(resultsString)
            resultsFile.write("\t" + str([round(x, 4) for x in holdoutQVals]) + "\n")
            resultsFile.close()

            agent.endEvaluationEpoch(epoch)

    agent.agentCleanup()
Example #23
0
 def __init__(self):
     # use parameters from Parameters file
     self.pa = Parameters.Parameters()
Example #24
0
from Parameters import *
from FacialDetector import *
import pdb
from Visualize import *


params: Parameters = Parameters()
params.dim_window = 36                      # exemplele pozitive (fete de oameni cropate) au 36x36 pixeli
params.dim_hog_cell = 6                     # dimensiunea celulei
params.overlap = 0.3
params.number_positive_examples = 6713      # numarul exemplelor pozitive
params.number_negative_examples = 20000     # numarul exemplelor negative
# params.number_negative_examples = 15000     # numarul exemplelor negative
params.threshold = 0                        # toate ferestrele cu scorul > threshold si maxime locale devin detectii
params.has_annotations = True

params.scaling_ratio = 0.9
params.use_hard_mining = False              # (optional)antrenare cu exemple puternic negative
params.use_flip_images = False              # adauga imaginile cu fete oglindite

#
params.cells_per_block = 2
params.dim_hog_cell = 2
# params.threshold = 1
params.threshold = 1.3
params.use_flip_images = True
params.use_hard_mining = True
# params.use_contrasted_images = True


facial_detector: FacialDetector = FacialDetector(params)
Example #25
0
    def Process(self):
        if param.ideal_cov():
            self.processIdeal()

        with open(self.Assm_model.fastq_name,'r') as fq, \
            open(os.path.join(self.Assm_view.path.out_dir, param.assembled_read_out()),'w') as oFile:

            ### Parse header (along with the first line) ###
            if self.Assm_model.is_FASTQ:
                flag_keep_reading, position, offset_track, headers = self.Assm_model.ParseModel.parseFQHeaders(
                    fq)
            else:  # If it's not FASTQ, we assume it is an internal format from image-processing
                flag_keep_reading = self.Assm_model.ParseModel.parseCSVHeaders(
                    fq, self.Assm_model.color_encoding_file_name)

            while flag_keep_reading:  # This block iterates per molecule block
                ### allocate molecule_model instance ###
                molecule_model = MoleculeModel(self.Assm_model)

                ### Parse each molecule at a time ###
                if self.Assm_model.is_FASTQ:
                    flag_keep_reading, position, offset_track = self.Assm_model.ParseModel.parseFQFile(
                        fq, molecule_model, position, offset_track)
                else:
                    flag_keep_reading, position = self.Assm_model.ParseModel.parseCSVFile(
                        fq, molecule_model)
                    if self.Assm_model.ParseModel.checkInsufficientSixmers(
                            molecule_model):
                        continue  # Check if we have enough sixmers

                ### Find perfect match and determine the originated gene target
                origin_gene, MTM_ambig = self.determineTargetGene(
                    molecule_model)
                if MTM_ambig == True:
                    self.MTM_removed_cnt += 1
                    continue
                elif origin_gene.startswith(
                        'XXX') and not param.show_XXX_targets():
                    self.XXX_removed_cnt += 1
                    continue
                elif self.Assm_model.ParseModel.checkInsufficientTargets(
                        self.Assm_model, molecule_model, origin_gene):
                    continue  # Check if we have enough candidate targets

                ### Initiate Graph structure ###
                molecule_model.initMutationGraph(
                    origin_gene)  # Charlie's Mutation-Graph-factory

                ### Estimating coverage and Coloring the Graph ###
                self.estimateAllCov(molecule_model, origin_gene)

                ### Update info ###
                self.updateReadCounts(molecule_model, position)

                if param.enable_blind_mu():
                    ### Find mutations and add to mutation Graph ###
                    self.FindMutations(molecule_model, origin_gene)

                    ### Graph trimming ###
                    molecule_model.Graph.GraphTrimming(
                        self.Assm_model.trimming_threshold)

                    ### Call insertions before computing the heaviest/longest path ###
                    molecule_model.Graph.CallInsertions(
                        self.Assm_model.trimming_threshold)

                ### Find maximum scoring path ###
                molecule_model.Graph.greedyQualityPath(0)
                #                molecule_model.Graph.initDynamicPath() # init Dynamic Programming table
                #                molecule_model.Graph.findDynamicPath(0) # Dynamic Programming

                molecule_model.Graph.GraphView.PrintFullReads(
                    molecule_model.Graph,
                    oFile,
                    molecule_model.pos_header + ';3Spotters_cnt:' +
                    str(len(molecule_model.sixmer_list)) +
                    ';gene_uniq_sixmer:' +
                    str(molecule_model.gene_uniq_sixmer) +
                    ';mutation_search_cnt:' +
                    str(len(molecule_model.mutation_search_list)) +
                    ';2Spotters_cnt:' + str(len(molecule_model.sixmer_NN_list))
                    #+ ';mu_bar_cnt:' + str(molecule_model.mutation_barcode_cnt)
                    + ';3Spotters:' + ','.join(molecule_model.sixmer_list) +
                    ';2Spotters:' + ','.join(molecule_model.sixmer_NN_list) +
                    ';Mutation3Spotters:' +
                    ','.join(molecule_model.mutation_search_list),
                    origin_gene)
                #molecule_model.Graph.GraphView.PrintFullReadsDynamicP(molecule_model.Graph, oFile, molecule_model.pos_header, origin_gene)

                ### free molecule_model instance ###
                del molecule_model

            ###  Print all outputs ###
            self.DeltaZScore()
            self.Assm_view.print_outputs(self.Assm_model, self)
            print 'MTM zscore removed:', self.MTM_removed_cnt
            print 'Feature contaminants removed:', self.XXX_removed_cnt
            self.Assm_view.out_message_str += '\nMTM zscore removed: ' + str(
                self.MTM_removed_cnt)
            self.Assm_view.out_message_str += '\nFeature contaminants removed: ' + str(
                self.XXX_removed_cnt)
            with open(param.output_message(), 'w') as outputMessageFile:
                outputMessageFile.write(self.Assm_view.out_message_str)

        if param.debug_output_on():
            self.debugFile.close()
Example #26
0
def ClassifierComparison(classifier, best_parameters, deal_name_steps,
                         save_path, train_x, train_y, test_x, test_y):

    #设置模型的保存地址
    model_save_file = save_path + classifier

    #设置需要比较的算法类型
    classifiers = Parameters.getClassifiers()
    filter1, filter2 = None, None
    for step_name, method in deal_name_steps.items():
        if step_name == 'Imputer':
            #缺失值处理
            train_x = Model.changeFeature(train_x, method)
            test_x = Model.changeFeature(test_x, method)
        elif step_name == 'MinMaxScaler':
            #数据无量纲处理
            train_x = Model.changeFeature(train_x, method)
            test_x = Model.changeFeature(test_x, method)
        elif step_name == 'SelectKBest':
            #做特征选择
            filter1 = Model.getFeature(best_parameters['SelectKBest__k'],
                                       method)
        elif step_name == 'reduce_dim':
            #做降维处理
            filter2 = Model.getFeature(
                best_parameters['reduce_dim__n_components'], method)
        else:
            print '您输入的数据预处理参数有误!'

    print('******************* %s ********************' % classifier)
    start_time = time.time()
    #调用模型
    clf = classifiers[classifier](best_parameters)
    if filter2 != None:
        model = make_pipeline(filter1, filter2, clf)
    else:
        model = make_pipeline(filter1, clf)
    model.fit(train_x, train_y)
    print('training took %fs!' % (time.time() - start_time))
    #预测
    predict = model.predict(test_x)
    #模型评估参数
    precision = metrics.precision_score(test_y, predict)
    recall = metrics.recall_score(test_y, predict)
    print('precision: %.2f%%, recall: %.2f%%' %
          (100 * precision, 100 * recall))
    accuracy = metrics.accuracy_score(test_y, predict)
    print('accuracy: %.2f%%' % (100 * accuracy))

    #画ROC曲线,并求AUC值
    proba = model.predict_proba(test_x)
    fpr, tpr, thresholds = roc_curve(test_y, proba[:, 1])
    mean_tpr = 0.0
    mean_fpr = np.linspace(0, 1, 500)
    #对mean_tpr在mean_fpr处进行插值
    mean_tpr += interp(mean_fpr, fpr, tpr)
    mean_tpr[0] = 0.0
    roc_auc = auc(fpr, tpr)
    plt.plot(fpr, tpr)
    plt.title("ROC curve of %s (AUC = %.4f)" % (classifier, roc_auc))
    plt.show()

    if model_save_file != None:
        pickle.dump(model, open(model_save_file, 'a'))
Example #27
0
 def DeltaZScore(self):
     for j in xrange(0, len(self.sum_zscores)):
         self.sum_zscores[j] = float(self.sum_zscores[j]) / self.cnt_zscores
     self.delta_zscore = self.sum_zscores[-1] - self.sum_zscores[-2]
     self.Assm_view.print_zscore(param.zscore_out(), self.sum_zscores)
Example #28
0
# This code simulates a conventional SAR system (no range cell migration) without waveform variation.
# A real scene is overlaid with a previously generated real nadir echo signal.
# The SAR raw data for the scene and the nadir echo have to be generated in advance using the code main_raw_data_generation.py
# (for the scene) and main_raw_data_generation_nadir.py (for the nadir echo)
##################################################################
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal.windows import general_hamming
import Parameters
import os
from datetime import datetime

# SIMULATION OF A CONVENTIONAL SAR SYSTEM WITH REAL SAR DATA AND A REALISTIC NADIR ECHO

param = Parameters.Parameters()         # Load constants and parameters from parameters class

# Constants

c0 = param.c                            # speed of light in vacuum [m/s]
R_E = param.R_EARTH                     # Earth's mean radius [m]

# System parameters

lamb = param.wavelength                     # wavelength [m]
h = param.height                            # orbit height [m]
CR = param.cr                               # chirp compression ratio
B = param.chirp_bandwidth                   # chirp bandwidth [Hz]
tau = param.chirp_duration                  # uncompressed pulse length [s]
f_s = param.rg_f_s                          # range sampling frequency [Hz]
PRF = param.PRF                             # pulse repetition frequency [Hz]
Example #29
0
 def __init__(self, path_inst):
     self.path = path_inst
     log_file_name = os.path.join(self.path.parent_dir, param.log_message())
     logging.basicConfig(level=logging.DEBUG, filename=log_file_name, filemode='a+', \
                     format='%(asctime)-15s %(levelname)-8s %(message)s')
     self.out_message_str = ''
Example #30
0
if __name__ == "__main__":
    if len(sys.argv) == 4:
        ref_fa_file_name = sys.argv[1]
        fasta_database_name = sys.argv[2]
        location_file_name = sys.argv[3]
        ms_result_file_name = sys.argv[4]
        ms_pep_col = sys.argv[5]
        header_start = sys.argv[6]
    elif len(sys.argv) == 0:
        ref_fa_file_name = ''
        fasta_database_name = '/UNCID_1582766.974b1fa9-8ee7-4f02-b0ff-221fc98abe5f.sorted_genome_alignments20150521-950-hevqjo_IGH_AND_UNMAPPED_splicegraph_1.fa'
        location_file_name = '/IG_VU_DB/temp_location.txt'
        ms_result_file_name = '/IG_VU_DB/VU_IG_DB_result_SPEC01.p'
        ms_pep_col = 9
        header_start = 'PEP'
    else:
        ref_fa_file_name = param.ref_fa_file_name()
        fasta_database_name = param.fasta_out_file()
        location_file_name = param.location_file()
        ms_result_file_name = param.ms_result_file()
        ms_pep_col = param.ms_pep_col()
        header_start = param.header_start()

    master_seq = formats.parseRefProtFASTA(ref_fa_file_name)
    loc_obj = GenomicLocation(master_seq, ref_fa_file_name,
                              fasta_database_name, location_file_name,
                              ms_result_file_name, ms_pep_col, header_start)
    loc_obj.process()
    del loc_obj
Example #31
0
import os
from threading import Thread
import swmmio
import shutil
# import the necessary modelus from pyswmm
import pyswmm
from pyswmm import Simulation, Nodes, Links, SystemStats, Subcatchments
import swmmtoolbox.swmmtoolbox as swmmtoolbox
import pdb
import Parameters

if __name__ == "__main__":
    os.chdir(
        r'C:\Users\magnu\OneDrive\DTU\NOAH_project_local\github\NOAH\NOAH_RTC_Tool\lib'
    )
    inp = Parameters.Read_Config_Parameters('Astlingen.ini')
    os.chdir(
        r'C:\Users\magnu\OneDrive\DTU\NOAH_project_local\github\NOAH\NOAH_RTC_Tool\saved_output\2020-03-13_15-04-17_Bellinge pump optimization_events'
    )
    os.chdir(
        r'C:\Users\magnu\OneDrive\DTU\NOAH_project_local\2020-01-08_22-21-46')

    # CSO_ids = ['22065','14','5619']
    CSO_ids = ['T4', 'T3', 'T5']
    model_outfile = 'Astlingen.out'


def Compute_CSO_statistics(inp,
                           CSO_ids,
                           model_outfile,
                           allow_CSO_different_locations=True):
Example #32
0
def main_script(config_file):
    # All input data is found in the conifguration file

    # All variables are stored as inp._VarName_ and can be used in functions.
    inp = Parameters.Read_Config_Parameters(config_file)

    # Rewriting file if timeseries are not in the right folder
    model_inp = SWMMControlFunction.redefine_time_series(inp)
    print(model_inp)
    # Reads input from the .inp file.
    basins = [x.strip(' ') for x in inp.Control_basins.split(',')]
    actuators = [x.strip(' ') for x in inp.Control_actuators.split(',')]
    depths = [float(x.strip(' ')) for x in inp.basin_depths.split(',')]
    depths_df = pd.DataFrame(depths, index=basins)  # written into a dataframe
    CSO_ids = [x.strip(' ') for x in inp.Control_CSO_id.split(',')]
    no_layers = inp.no_layers

    # creating directory for the two output files. Only the final files are saved.
    # These are stored in /NOAH_RTC_Tool/output/_Timestamp_
    timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    os.mkdir('../output/' + timestamp)
    filename = '_Control'
    model_rptfile = '../output/' + timestamp + '/' + inp.model_name + filename + '.rpt'
    model_outfile = '../output/' + timestamp + '/' + inp.model_name + filename + '.out'
    # new file is the file that the control is written to and that SWMM should run
    new_file = str(
        model_inp.split('.')[0] + filename + '.' + model_inp.split('.')[1])

    # The basin structure is defined
    if 'Astlingen' in inp.model_name:
        basin_structure = SWMMControlFunction.create_basin_structure(basins)
    else:
        print(
            'Basin structure is not implemented for this model. Compute that manually in the function create_basin_structure() in SWMMControlFunction.py'
        )

    # Creating pairs of basins from the basins_structure dataframe
    basin_pairs = []
    for ds_basin in basins:
        for ups_basin in basins:
            if basin_structure[ups_basin][ds_basin] == 1:
                basin_pairs.append([ups_basin, ds_basin])

    if inp.Control_setup_type == 'default':  # if default setups are tried out
        no_layers = 3

        # The dataframe with the Control setup (i.e. orifice settings and thresholds between zones) that should be computed to SWMM is created.
        Input_df = SWMMControlFunction.create_default_input_df(
            basins, depths_df, no_layers, inp.Default_setup_selection)

        # A SWMM file is written with the selected rules
        SWMMControlFunction.Control_to_SWMM(Input_df, inp, timestamp,
                                            model_inp, new_file,
                                            basin_structure, basins, actuators,
                                            depths_df, no_layers)

        # Compute the SWMM simulation
        SWMMControlFunction.run_SWMM(new_file, model_rptfile, model_outfile)

        CSO_stats_df = CSO_statistics.Compute_CSO_statistics(
            inp, CSO_ids, model_outfile)
        print(CSO_stats_df.head())
        print(CSO_stats_df['Volume'].sum())

    elif inp.Control_setup_type == 'optimized':

        print('optimizing...')

        # =============================================================================
        # Optimization routine
        # The optimization routine can be computed here or called in a seperate function
        # =============================================================================

        # Running optimizer for all metaparameters at once
        # start_value = [1,1,1,1,1,5,5,5,5,5]
        # result = optimize.minimize(fun = optimizer_wrapper_all,
        #                   args = (inp,timestamp,new_file,
        #                   basin_structure,basin_pairs,basins,actuators,depths_df,no_layers,CSO_ids,
        #                   model_inp,model_rptfile,model_outfile),
        #                   x0 = start_value, method='Nelder-Mead',
        #                   options = {'disp':True})

        # Running optimizer for only one set of meta parameters that are identical for all basins (i.e. 1 alpha and 1 beta)
        start_value = [0, 5]  # [alpha,beta]
        result_one_set = optimize.minimize(
            fun=optimizer_wrapper_one_set,
            args=(inp, timestamp, new_file, basin_structure, basin_pairs,
                  basins, actuators, depths_df, no_layers, CSO_ids, model_inp,
                  model_rptfile, model_outfile),
            x0=start_value,
            method='Nelder-Mead',
            options={'disp': True})
        print('Result of the one-set optimizer: ', result_one_set.x)

        # Running optimizer for only alpha
        start_value = np.repeat(result_one_set.x[0], len(basin_pairs))
        betas = np.repeat(result_one_set.x[1], len(basin_pairs))

        result_alpha = optimize.minimize(
            fun=optimizer_wrapper_alpha,
            args=(betas, inp, timestamp, new_file, basin_structure,
                  basin_pairs, basins, actuators, depths_df, no_layers,
                  CSO_ids, model_inp, model_rptfile, model_outfile),
            x0=start_value,
            method='Nelder-Mead',
            options={'disp': True})

        print('Results of the alpha optimizer: ', result_alpha.x)

        # Running optimizer for only Beta
        start_value = np.repeat(result_one_set.x[1], len(basin_pairs))
        alphas = result_alpha.x

        result_beta = optimize.minimize(
            fun=optimizer_wrapper_beta,
            args=(alphas, inp, timestamp, new_file, basin_structure,
                  basin_pairs, basins, actuators, depths_df, no_layers,
                  CSO_ids, model_inp, model_rptfile, model_outfile),
            x0=start_value,
            method='Nelder-Mead',
            options={'disp': True})

        print('Results of the beta optimizer: ', result_beta.x)
Example #33
0
def main(path_continue_learning=None, total_epoch=0, new_name=None):
    """
    :param path_continue_learning: Path were the network is already saved
                                   (don t use if it is the beginning of the training)
    :param total_epoch: Number of epoch needed don t use if it is the beginning of the training)
    :param new_name: New name of the network, if we want to use again a network already train.
    :return: Nothing but train the network and save CSV files for the error and also save the network regularly
    """
    # Manual seed of the network to have reproducible experiment
    torch.manual_seed(26542461)

    # If the network was already train we import it
    if path_continue_learning is not None:
        # Load the trained Network
        parameters, network = Save_import.load_from_checkpoint(path_checkpoint=path_continue_learning)

        # Here we can change some parameters, the only one necessary is the total_epoch
        parameters.epoch_total = total_epoch
        # parameters.learning_rate_decay = - 4.5 * 10 ** (-5)
        # parameters.batch_size = 4
        # parameters.batch_size_val = 4
        # parameters.learning_rate = 0.01
        # parameters.momentum_IoU = 0
        # parameters.loss = "IoU_Lovasz"

        # Put weight to GPU
        if torch.cuda.is_available():
            parameters.weight_grad = parameters.weight_grad.cuda()

        # If a new name is define, we create new CSV files associated and change the name of the network
        if new_name is not None:
            # Init the csv file that will store the error, this time we make a copy of the existing error
            Save_import.duplicated_csv(path_CSV=parameters.path_CSV,
                                       old_name_network=parameters.name_network,
                                       new_name_network=new_name,
                                       train_number=parameters.train_number)
            parameters.name_network = new_name

        with open(parameters.path_print, 'w') as txtfile:
            txtfile.write('\n               The program will continue \n')

    # If the network was not train, we start from scratch
    else:
        # Define the weight
        weight_grad = torch.FloatTensor([2.381681e+09, 3.856594e+08, 1.461642e+09, 4.291781e+07,
                                         5.597591e+07, 8.135516e+07, 1.328548e+07, 3.654657e+07,
                                         1.038652e+09, 7.157456e+07, 2.527450e+08, 7.923985e+07,
                                         9.438758e+06, 4.460595e+08, 1.753254e+07, 1.655341e+07,
                                         1.389560e+07, 6.178567e+06, 2.936571e+07])

        sum_grad = weight_grad.sum()
        # normalize and then take the invert
        for i in range(weight_grad.size(0)):
            weight_grad[i] = sum_grad / weight_grad[i]
        # Normalize again and mult by the number of classes
        weight_grad = (weight_grad / weight_grad.sum()) * weight_grad.size(0)

        # if you want to keep the wiehgt, comment the next line
        weight_grad = torch.FloatTensor([1 for i in range(19)])

        # Define all the parameters
        parameters = Parameters.Parameters(nColumns=8,
                                           nFeatMaps=[16, 32, 64, 128, 256],
                                           nFeatureMaps_init=3,
                                           number_classes=20 - 1,
                                           label_DF=Label.create_label(),

                                           width_image_initial=2048, height_image_initial=1024,
                                           size_image_crop=401,

                                           dropFactor=0.1,
                                           learning_rate=0.01,
                                           learning_rate_decay=1 * (10 ** (-2)),
                                           weight_decay=0,
                                           beta1=0.9,
                                           beta2=0.999,
                                           epsilon=1 * 10 ** (-8),
                                           batch_size=5,
                                           batch_size_val=5,
                                           epoch_total=400,
                                           actual_epoch=0,
                                           ratio=(1, 1),
                                           weight_grad=weight_grad,
                                           loss="focal_loss",
                                           momentum_IoU=0,

                                           path_save_net="./Model/",
                                           name_network="focal_loss2",
                                           train_number=0,
                                           path_CSV="./CSV/",
                                           path_data="/home_expes/collections/Cityscapes/",
                                           path_print="./Python_print_focal_loss.txt",
                                           path_result="./Result",
                                           num_workers=2)
        # Define the GridNet
        network = GridNet_structure.gridNet(nInputs=parameters.nFeatureMaps_init,
                                            nOutputs=parameters.number_classes,
                                            nColumns=parameters.nColumns,
                                            nFeatMaps=parameters.nFeatMaps,
                                            dropFactor=parameters.dropFactor)

        with open(parameters.path_print, 'w') as txtfile:
            txtfile.write('\n               Start of the program \n')

        # Init the csv file that will store the error
        Save_import.init_csv(name_network=parameters.name_network,
                             train_number=parameters.train_number,
                             path_CSV=parameters.path_CSV,
                             path_print=parameters.path_print)

    # Import both DataSets with the transformation
    train_dataset = Save_import.cityscapes_create_dataset(quality='fine',
                                                          mode='train',
                                                          transform=parameters.transforms_input,
                                                          transform_target=parameters.transforms_output,
                                                          parameters=parameters)

    val_dataset = Save_import.cityscapes_create_dataset(quality='fine',
                                                        mode='val',
                                                        transform=parameters.transforms_input,
                                                        transform_target=parameters.transforms_output,
                                                        parameters=parameters)

    # Create the DataSets for Pytorch used
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=parameters.batch_size,
                                               shuffle=True,
                                               num_workers=parameters.num_workers,
                                               drop_last=False)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=parameters.batch_size_val,
                                             shuffle=True,
                                             num_workers=parameters.num_workers,
                                             drop_last=False)

    # If there is more than one GPU we can use them
    if torch.cuda.device_count() > 1:
        with open(parameters.path_print, 'a') as txtfile:
            txtfile.write("\nLet's use " + str(torch.cuda.device_count()) + " GPUs! \n")
        network = torch.nn.DataParallel(network)
    else:
        with open(parameters.path_print, 'a') as txtfile:
            txtfile.write("\nWe don t have more than one GPU \n")
        # ... But we still use it in this case ? ... TODO try without to check if it is working
        network = torch.nn.DataParallel(network)

    # Put the network on GPU if possible
    if torch.cuda.is_available():
        network.cuda()
    else:
        with open(parameters.path_print, 'a') as txtfile:
            txtfile.write("\nAccording to torch Cuda is not available \n")

    # Train the network
    train(network=network,
          parameters=parameters,
          train_loader=train_loader,
          val_loader=val_loader)
Example #34
0
    for i, img in enumerate(MS_train_n):
        img_X0[i] = misc.imread(
            os.path.join(sample_dir, 'train/MS_negative/', img))

    label = np.zeros((n1 + n0, 2))
    label[0:n1, 1] = 1
    label[n1:(n1 + n0), 0] = 1

    j = range(n1 + n0)
    random.shuffle(j)
    X = np.concatenate((img_X1, img_X0))
    return X[j], label[j]


if __name__ == '__main__':
    evaluate_only, external_test, tr_n1, tr_n0, tr_b, tr_e, tr_t, te_n, nn = Parameters.deal_args(
        sys.argv[1:])

    print '--------------- Read Samples ---------------'
    img_X, Y = read_train_sample(tr_n1, tr_n0)

    m = NN_Model.Model(img_X, Y, nn + '_DL_OSMMS')
    if not evaluate_only:
        print '--------------- Training on OSM Labels---------------'
        m.set_batch_size(tr_b)
        m.set_epoch_num(tr_e)
        m.set_thread_num(tr_t)
        m.train(nn)
        print '--------------- Evaluation on Training Samples ---------------'
        m.evaluate()
    del img_X, Y
    gc.collect()
Example #35
0
 def updateBtn(self):  #call update button function
     parUpt = Parameters()
     parUpt.LowLImVal = self.login_widget.lineEdit_Lower_limit_value.value()
     parUpt.UpLImVal = self.login_widget.lineEdit_Upper_limit_value.value()
     self.recive_update_singal.emit(parUpt)
Example #36
0

if rank == 0:

    # start a timer
    start_time = timer()

    # print the herald 
    FileIO.print_herald(n_ranks)

    # get input opts from file
    parser = Parser.parser()
    parser.parse(input_file)

    # initialize params obj
    params = Parameters.params(parser)
    params.rank_0_init()

    # get the total set of Q's 
    total_Qsteps = params.total_Qsteps
    total_reduced_Q = params.total_reduced_Q

    # split the total set of Q's over the procs
    reduced_Q_set = prepare_Qpoints(params,n_ranks)

    # pass the sets of Q's and params object to each proc.
    for ii in range(1,n_ranks):
        comm.send(reduced_Q_set[ii],dest=ii,tag=0)
        comm.send(params,dest=ii,tag=1)

    # initiliaze params on rank 0 using set of Q's
Example #37
0
def run_experiment(args):
    parameters = Parameters.processArguments(args, __doc__)

    #if the nnFile is a directory, check for a previous experiment run in it and start from there
    #load its parameters, append to its evalresults file, open its largest network file
    #If its none, create a experiment directory. create a results file, save parameters, save network files here.

    experimentDirectory = parameters.rom + "_" + time.strftime(
        "%d-%m-%Y-%H-%M") + "/"
    resultsFileName = experimentDirectory + "results.csv"
    startingEpoch = 1
    if parameters.nnFile is None or parameters.nnFile.endswith(".pkl"):
        #Create your experiment directory, results file, save parameters
        if not os.path.isdir(experimentDirectory):
            os.mkdir(experimentDirectory)

        resultsFile = open(resultsFileName, "a")
        resultsFile.write("Epoch,\tAverageReward,\tMean Q Value\n")
        resultsFile.close()

        parametersFile = open(experimentDirectory + "parameters.pkl", 'wb', -1)
        cPickle.dump(parameters, parametersFile)
        parametersFile.close()

    if parameters.nnFile is not None and os.path.isdir(parameters.nnFile):
        #Found a experiment directory
        if not parameters.nnFile.endswith("/"):
            parameters.nnFile += "/"

        experimentDirectory = parameters.nnFile
        resultsFileName = experimentDirectory + "results.csv"

        if os.path.exists(experimentDirectory + "parameters.pkl"):
            parametersFile = open(experimentDirectory + "parameters.pkl", 'rb')
            parameters = cPickle.load(parametersFile)
            parametersFile.close()
        else:
            parametersFile = open(experimentDirectory + "parameters.pkl", 'wb',
                                  -1)
            cPickle.dump(parameters, parametersFile)
            parametersFile.close()

        contents = os.listdir(experimentDirectory)
        networkFiles = []
        for handle in contents:
            if handle.startswith("network") and handle.endswith(".pkl"):
                networkFiles.append(handle)

        if len(networkFiles) == 0:
            #Found a premature experiment, didnt finish a single training epoch
            parameters.nnFile = None
        else:
            #Found a previous experiments network files, now find the highest epoch number
            highestNNFile = networkFiles[0]
            highestNetworkEpochNumber = int(
                highestNNFile[highestNNFile.index("_") +
                              1:highestNNFile.index(".")])
            for networkFile in networkFiles:
                networkEpochNumber = int(networkFile[networkFile.index("_") +
                                                     1:networkFile.index(".")])
                if networkEpochNumber > highestNetworkEpochNumber:
                    highestNNFile = networkFile
                    highestNetworkEpochNumber = networkEpochNumber

            startingEpoch = highestNetworkEpochNumber + 1
            #dont use full exploration, its not a good way to fill the replay memory when we already have a decent policy
            if startingEpoch > 1:
                parameters.epsilonStart = parameters.epsilonEnd

            parameters.nnFile = experimentDirectory + highestNNFile
            print "Loaded experiment: " + experimentDirectory + "\nLoaded network file:" + highestNNFile

    sys.setrecursionlimit(10000)
    ale = ALEInterface()

    Environment.initializeALEParameters(ale, parameters.seed,
                                        parameters.frameSkip,
                                        parameters.repeatActionProbability,
                                        parameters.displayScreen)
    ale.loadROM(parameters.fullRomPath)
    minimalActions = ale.getMinimalActionSet()

    agent = DQNAgent.DQNAgent(
        minimalActions, parameters.croppedHeight, parameters.croppedWidth,
        parameters.batchSize, parameters.phiLength, parameters.nnFile,
        parameters.loadWeightsFlipped, parameters.updateFrequency,
        parameters.replayMemorySize, parameters.replayStartSize,
        parameters.networkType, parameters.updateRule,
        parameters.batchAccumulator, parameters.networkUpdateDelay,
        parameters.discountRate, parameters.learningRate, parameters.rmsRho,
        parameters.rmsEpsilon, parameters.momentum, parameters.epsilonStart,
        parameters.epsilonEnd, parameters.epsilonDecaySteps,
        parameters.evalEpsilon, parameters.useSARSAUpdate,
        parameters.kReturnLength)

    for epoch in xrange(startingEpoch, parameters.epochs + 1):
        agent.startTrainingEpoch(epoch)
        runTrainingEpoch(ale, agent, epoch, parameters.stepsPerEpoch)
        agent.endTrainingEpoch(epoch)

        networkFileName = experimentDirectory + "network_" + str(
            epoch) + ".pkl"
        DeepNetworks.saveNetworkParams(agent.network.qValueNetwork,
                                       networkFileName)

        if parameters.stepsPerTest > 0 and epoch % parameters.evaluationFrequency == 0:
            agent.startEvaluationEpoch(epoch)
            avgReward = runEvaluationEpoch(ale, agent, epoch,
                                           parameters.stepsPerTest)
            holdoutQVals = agent.computeHoldoutQValues(3200)

            resultsFile = open(resultsFileName, 'a')
            resultsFile.write(
                str(epoch) + ",\t" + str(round(avgReward, 4)) + ",\t\t" +
                str(round(holdoutQVals, 4)) + "\n")
            resultsFile.close()

            agent.endEvaluationEpoch(epoch)

    agent.agentCleanup()
Example #38
0
# plt.rc('text', usetex=True)
# plt.rc('xtick',labelsize=14)
# plt.rc('ytick',labelsize=14)
plt.rc('font', family='serif')
plt.rc('xtick', labelsize='x-large')
plt.rc('ytick', labelsize='x-large')
plt.style.use('classic')
# plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
plt.rc('text', usetex=True)
plt.rc('axes', labelsize=18)
plt.rc('legend', fontsize=13)
model_param_filepath = '../Input_Params/input_params.yml'
debug_flag = False
verbosity = 1
time_code = True
model_params = Parameters.ModelParams(model_param_filepath, verbosity,
                                      debug_flag)
model_params.reionize_model = 0
model_params.load_paramters_to_C()
model_params.read_in_data_tables_from_c()

if model_params.reionize_model == 0:
    title = 'Okamoto et al (2008)'

    # masses = np.arange(10**8, 10**11, 10**8)

    masses = np.logspace(np.log10(10**9), np.log10(10**11), 1000)

    redshifts = np.arange(0, 10, 0.01)

    extent = [10**9, 10**11, 0, 10]
Example #39
0
import time
from Desired import *
from Jacobian import *
from Integral import *
from Parameters import *
from tkinter import *
from tkinter import ttk

D = Desired()
FK = ForwardKinematics()
J = Jacobian()
I = Integral()
P = Parameters()


class InverseKinematics:
    @staticmethod
    def compute(q=np.zeros(P.links().shape[0]),
                p=np.zeros(3),
                o=np.zeros(3),
                a=np.zeros(3),
                s='d',
                f='p'):
        m = q.shape[0]
        n = p.shape[1]
        j = np.zeros((m, n))
        for k in range(0, n):
            d = D.quaternion(p[:, k], o[:, k], a[:, k])
            j[:, k], z = InverseKinematics.algorithm(q, d, s, f)
            j[:, k] = InverseKinematics.convert(j[:, k])
        return j
Example #40
0
import os, sys
import pandas as pd
from memory_profiler import profile
from RIPSMasterScript import *
import Parameters
sys.stdout.flush()


@profile
def runCEmain(cwd=os.getcwd(), case=None, runUC=False):

    print('Loading parameters and setting up initial data')
    # Load parameters
    genparam = Parameters.Generalparameters()
    genparam.load(fname=os.path.join(cwd, 'generalparameters.txt'))

    reserveparam = Parameters.Reserveparameters()
    reserveparam.load(fname=os.path.join(cwd, 'reserveparameters.txt'))

    curtailparam = Parameters.Curtailmentparameters()
    curtailparam.load(fname=os.path.join(cwd, 'curtailmentparameters.txt'))

    if case is not None:

        case = int(case) - 1

        df = pd.read_csv(os.path.join(cwd, 'list_cases.csv'),
                         comment='#',
                         sep=',',
                         skipinitialspace=True,
                         dtype={
Example #41
0
 def __init__(self, pFile):
     self.pDict = Parameters.readParameters(pFile)
Example #42
0
def dmrg_finite_size(para=None):
    from MPSClass import MpsOpenBoundaryClass as Mob
    t_start = time.time()
    info = dict()
    print('Preparation the parameters and MPS')
    if para is None:
        para = pm.generate_parameters_dmrg()
    # Initialize MPS
    if is_parallel:
        par_pool = ThreadPool(n_nodes)
    else:
        par_pool = None

    A = Mob(length=para['l'],
            d=para['d'],
            chi=para['chi'],
            way='qr',
            ini_way='r',
            operators=para['op'],
            debug=is_debug,
            is_parallel=is_parallel,
            par_pool=par_pool,
            is_save_op=is_save_op)
    A.correct_orthogonal_center(para['ob_position'])
    print('Starting to sweep ...')
    e0_total = 0
    info['convergence'] = 1
    ob = dict()
    for t in range(1, para['sweep_time'] + 1):
        if_ob = ((t % para['dt_ob']) == 0) or t == (para['sweep_time'] - 1)
        if if_ob:
            print('In the %d-th round of sweep ...' % t)
        for n in range(para['ob_position'] + 1, para['l']):
            if para['if_print_detail']:
                print('update the %d-th tensor from left to right...' % n)
            A.update_tensor_eigs(n,
                                 para['index1'],
                                 para['index2'],
                                 para['coeff1'],
                                 para['coeff2'],
                                 para['tau'],
                                 para['is_real'],
                                 tol=para['eigs_tol'])
        for n in range(para['l'] - 2, -1, -1):
            if para['if_print_detail']:
                print('update the %d-th tensor from right to left...' % n)
            A.update_tensor_eigs(n,
                                 para['index1'],
                                 para['index2'],
                                 para['coeff1'],
                                 para['coeff2'],
                                 para['tau'],
                                 para['is_real'],
                                 tol=para['eigs_tol'])
        for n in range(1, para['ob_position']):
            if para['if_print_detail']:
                print('update the %d-th tensor from left to right...' % n)
            A.update_tensor_eigs(n,
                                 para['index1'],
                                 para['index2'],
                                 para['coeff1'],
                                 para['coeff2'],
                                 para['tau'],
                                 para['is_real'],
                                 tol=para['eigs_tol'])

        if if_ob:
            ob['eb_full'] = A.observe_bond_energy(para['index2'],
                                                  para['coeff2'])
            ob['mx'] = A.observe_magnetization(1)
            ob['mz'] = A.observe_magnetization(3)
            if para['lattice'] in ('square', 'chain'):
                ob['e_per_site'] = (sum(ob['eb_full']) -
                                    para['hx'] * sum(ob['mx']) -
                                    para['hz'] * sum(ob['mz'])) / A.length
            else:
                ob['e_per_site'] = sum(ob['eb_full'])
                for n in range(0, para['coeff1'].shape[0]):
                    if para['index1'][n, 1] == 1:
                        ob['e_per_site'] += para['coeff1'][n] * ob['mx'][n]
                    elif para['index1'][n, 1] == 3:
                        ob['e_per_site'] += para['coeff1'][n] * ob['mz'][n]
                ob['e_per_site'] /= A.length
            info['convergence'] = abs(ob['e_per_site'] - e0_total)
            if info['convergence'] < para['break_tol']:
                print(
                    'Converged at the %d-th sweep with error = %g of energy per site.'
                    % (t, info['convergence']))
                break
            else:
                print('Convergence error of energy per site = %g' %
                      info['convergence'])
                e0_total = ob['e_per_site']
        if t == para['sweep_time'] - 1 and info['convergence'] > para[
                'break_tol']:
            print('Not converged with error = %g of eb per bond' %
                  info['convergence'])
            print('Consider to increase para[\'sweep_time\']')
    ob['eb'] = get_bond_energies(ob['eb_full'], para['positions_h2'],
                                 para['index2'])
    A.calculate_entanglement_spectrum()
    A.calculate_entanglement_entropy()
    info['t_cost'] = time.time() - t_start
    print('Simulation finished in %g seconds' % info['t_cost'])
    A.clean_to_save()
    if A._is_parallel:
        par_pool.close()
    return ob, A, info, para
Example #43
0
    def dynamic(self):
        timestep = self.currentTimeStep()
        if timestep in [15, 25]:
            for aType in LUtypes:
                subDict = {}
                summap = self.nullMask
                for aSample in samples:
                    path = os.path.join(str(aSample), 'landUse')
                    landuse = self.readmap(path)
                    thisLU = scalar(landuse == aType)
                    weight = self.mapping.get(aSample)
                    summap = summap + (thisLU * float(weight))
                total = sum(list(self.mapping.values())[0:nr_samples])
                ##print(total)
                averageMap = summap / float(total)
                path2 = os.path.join(
                    'results_stoch',
                    'sc' + str(ParametersProjection.getScenario()),
                    str(aType) + 'L')  # Renan: I added the "sc" folder here
                self.report(averageMap, path2)


nr_samples = ParametersProjection.getNrSamplesFromFile('particle_mapping.csv')
samples = range(1, nr_samples + 1)
LUtypes = range(1, 12)
nrOfTimeSteps = Parameters.getNrTimesteps()
myModel = CheckModel()
dynamicModel = DynamicFramework(myModel, nrOfTimeSteps)
dynamicModel.run()
    for c in chr_list:
        chrQuery = {'chr': c}
        updateDict = {'CHR': ''.join(['chr', c])}
        setDict = {'$set': updateDict}
        print 'Adding chr prefix for CHR fields containing %s' % (c)
        mongo.update(collection_name, chrQuery, setDict)

    distinct = mongo.distinct(collection_name, 'chr')
    print 'chromosomes updated to:'
    print distinct

    print 'Done in %i seconds' % (time.time() - starttime)



if __name__ == "__main__":
    annotation_name = 'annotations'

    p = Parameters.parameter()
    mongo = Mongo_Connector.MongoConnector(p.get('server'), p.get('port'), p.get('default_database'))
    # mongo.ensure_index(annotation_name, 'chr')  # don't index just on chromosome name

    chromosome_list = [str(i) for i in range(1, 22)]    # Create an expected list of chromosomes
    chromosome_list.append('X')
    chromosome_list.append('Y')
    print 'Adding prefix chr to chromosome field in methyl450 arrays...'
    AddChrPrefix(annotation_name, chromosome_list)
    mongo.close()


Example #45
0
 def initial(self):
     self.nullMask = self.readmap('nullMask')
     self.startTime = Parameters.getFirstTimeStep()
     self.mapping = ParametersProjection.getMappingFromFile('particle_mapping.csv',\
                                                     'New_ID', 'Nr_particles_weight')