예제 #1
0
파일: game.py 프로젝트: pom2ter/immortal
	def __init__(self):
		global debug, font_width, font_height, con, panel, ps, fov_noise, savefiles, baseitems, prefix, suffix, tiles, monsters
		IO.load_settings()
		debug = dbg.Debug()
		debug.enable = True
		for key, value in fonts.items():
			if setting_font == key:
				libtcod.console_set_custom_font(value['file'], libtcod.FONT_TYPE_GREYSCALE | libtcod.FONT_LAYOUT_ASCII_INROW)
				font_width = value['width']
				font_height = value['height']
		self.init_root_console()
		#libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'Immortal ' + VERSION, False)

		con = libtcod.console_new(MAP_WIDTH, MAP_HEIGHT)
		panel = libtcod.console_new(MESSAGE_WIDTH, MESSAGE_HEIGHT)
		ps = libtcod.console_new(PLAYER_STATS_WIDTH, PLAYER_STATS_HEIGHT)
		fov_noise = libtcod.noise_new(1, 1.0, 1.0)
		savefiles = [f for f in os.listdir('saves') if os.path.isfile(os.path.join('saves', f))]
		IO.load_high_scores()
		baseitems = BaseItemList()
		baseitems.init_parser()
		prefix = PrefixList()
		prefix.init_parser()
		suffix = SuffixList()
		suffix.init_parser()
		tiles = mapgen.TileList()
		tiles.init_parser()
		monsters = MonsterList()
		monsters.init_parser()
		self.main_menu()
예제 #2
0
파일: MACD.py 프로젝트: Esmidth/TushareMod
def testAll(date):
	path = '/Users/Esmidth/Documents/Github/TushareMod/DataBase' + date.__str__() + '/'
	files = os.listdir(path)
	dic = {}
	profits = []
	idd = 1
	# lenth = len(sh.DataBase20151106)
	length = len(files)
	'''
	for x in sh.DataBase20151106:
		profit = MACDMethod(IO.load(path + x + '.xlsx')) * 100
		dic[profit] = x
		vals.append(profit)
		print("%.2f%%  %s Done\t Profit: %s%%" % (100 * i / lenth, x, profit))
		i += 1
	 '''

	for file in files:
		profit = MACDMethod(IO.load(path + file)) * 100
		dic[profit] = file
		profits.append(profit)
		print("%.2f%%  %s Done\t Profit: %s%%" % (100 * idd / length, file, profit))
		idd += 1
	profits = sorted(profits)
	profits.reverse()
	idd = 1
	for profit in profits:
		print("#%s\t%s:\t%.2f%%" % (idd, dic[profit], profit))
		idd += 1
	IO.outputToExcel('2016_04_12', dic, profits)
예제 #3
0
파일: map.py 프로젝트: hr203/c2raytools
def map_temp():
    for i in range(start, len(redshifts)):
        filename = setup_dirs.resultsdir() + "map_temper_" + str("%.3f" % redshifts[i]) + ".dat"
        temp_filename = setup_dirs.path() + "Temper3D_" + str("%.3f" % redshifts[i]) + ".bin"
        tfile = c2t.TemperFile(temp_filename)
        IO.writemap(tfile.temper, filename)
    print "Writen map to " + filename
예제 #4
0
파일: EquSys.py 프로젝트: jksr/beta2
	def __init__(self,ballfn,stickfn):
		self.orientdict = {} # stick orientation dict
		self.typedict = {} # stick type dict

		balldat = IO.read_balls(ballfn)
		stickdat = IO.read_sticks(stickfn)
		ball1s = stickdat['BALL1']
		ball2s = stickdat['BALL2']
		types = stickdat['TYPE']
		# loop over sticks
		for i in range(len(stickdat)):
			# make sure 1st endball id always less than or eq to 2nd endball id
			id1 = min(ball1s[i], ball2s[i])
			id2 = max(ball1s[i], ball2s[i])
			self.typedict[(id1,id2)] = types[i]
			coord1 = balldat[id1].coords # ball1 coord
			coord2 = balldat[id2].coords # ball2 coord
			stick = coord1 - coord2 # stick vector
			so = stick / np.linalg.norm(stick) # stick orientation
			so = so.reshape((1,3))
			# calculate sblock
			# sblock(ijpair)=| ll lm ln |
			#				| lm mm mn |
			#				| ln mn nn |
			self.orientdict[id1, id2] = np.dot(so.T,so)
예제 #5
0
 def run(self, *args):
     result = list(args)
     for (validate, function) in self._stream:
         if not validate(*result): return result
         result = [function(*result)]
         for res in result: IO.debug(res)
     return result
예제 #6
0
def ada_boost_dt():
    """
    Submission: ada_boost_dt_0707_03.csv
    E_val: 0.854350
    E_in: 0.889561
    E_out: 0.8832315976033993
    """
    from sklearn.ensemble import AdaBoostClassifier
    from sklearn.preprocessing import StandardScaler
    from sklearn.cross_validation import cross_val_score
    from sklearn.pipeline import Pipeline

    X, y = dataset.load_train()

    raw_scaler = StandardScaler()
    raw_scaler.fit(X)
    X_scaled = raw_scaler.transform(X)

    ab = AdaBoostClassifier(n_estimators=300)

    scores = cross_val_score(ab, X_scaled, y, cv=5, n_jobs=-1)
    logger.debug('CV: %s', scores)
    logger.debug('E_val: %f', sum(scores) / len(scores))

    ab.fit(X_scaled, y)

    logger.debug('E_in: %f', Util.auc_score(ab, X_scaled, y))

    IO.dump_submission(Pipeline([('scale_raw', raw_scaler),
                                 ('ab', ab)]), 'ada_boost_dt_0707_03')
예제 #7
0
def bagging_lr():
    """
    Submission: bagging_lr_0707_02.csv
    E_val:
    E_in:
    E_out:
    """
    from sklearn.linear_model import LogisticRegression
    from sklearn.ensemble import BaggingClassifier
    from sklearn.preprocessing import StandardScaler
    from sklearn.pipeline import Pipeline

    X, y = dataset.load_train()

    raw_scaler = StandardScaler()
    raw_scaler.fit(X)
    X_scaled = raw_scaler.transform(X)

    bag = BaggingClassifier(LogisticRegression(class_weight='auto'),
                            n_estimators=3000, oob_score=True, n_jobs=-1,
                            verbose=2)

    logger.debug('E_val (oob): %f', bag.oob_score_)
    logger.debug('E_in: %f', Util.auc_score(bag, X_scaled, y))

    IO.dump_submission(Pipeline([('scale_raw', raw_scaler),
                                 ('bag', bag)]), 'bagging_lr_0707_02')
예제 #8
0
def rf2():
    """
    Submission: rf2_0704_04.csv
    3000 trees
    E_val: 0.871431
    E_in: 0.999998
    E_out:
    30000 trees
    E_val:
    E_in:
    E_out:
    """
    from sklearn.preprocessing import StandardScaler
    from sklearn.pipeline import Pipeline
    from sklearn.ensemble import RandomForestClassifier

    X, y = dataset.load_train()

    raw_scaler = StandardScaler()
    raw_scaler.fit(X)
    X_scaled = raw_scaler.transform(X)

    rf = RandomForestClassifier(n_estimators=30000, oob_score=True, n_jobs=-1,
                                class_weight='auto', max_features='log2')
    rf.fit(X_scaled, y)

    logger.debug('Eval(oob): %f', rf.oob_score_)
    logger.debug('Ein: %f', Util.auc_score(rf, X_scaled, y))

    IO.cache(rf, Path.of_cache('rf.RandomForestClassifier.log2.pkl'))
    IO.dump_submission(Pipeline([('scale_raw', raw_scaler),
                                 ('rf', rf)]), 'rf2_0704_04')
예제 #9
0
    def solve(self):
        running = True
        while running:
            # Run techniques to remove any possible values from cells
            result1 = self.double()
            result2 = self.hidden_double()
            result3 = self.pointing_double()
            result4 = self.triple()
            result5 = self.hidden_triple()
            result6 = self.quad()
            result7 = self.hidden_quad()
            result8 = self.x_wing()
            result9 = self.swordfish()

            # Process finding singles last as previous steps will have removed more possible values
            result10 = self.single()
            result11 = self.hidden_single()

            # Hidden doubles/triples/quads will only return true if they have removed anything.
            # Single and Hidden single will also only return true if a single was found.
            running = result1 or result2 or result3 or result4 or result5 or result6 or result7 or result8 or result9 \
                or result10 or result11

        IO.print_board(self.board.raw_board)
        solved_board, result = BruteForce.smart_brute_force(self.board)
        IO.print_board(solved_board)
        return solved_board, result
예제 #10
0
def lr_with_scale2():
    """
    Submission: lr_with_scale2_0704_03.csv
    E_val:
    E_in: 0.878996
    E_out: 0.8768131004917349
    """
    from sklearn.linear_model import LogisticRegressionCV
    from sklearn.preprocessing import StandardScaler
    from sklearn.pipeline import Pipeline

    X, y = dataset.load_train()

    raw_scaler = StandardScaler()
    raw_scaler.fit(X)
    X_scaled = raw_scaler.transform(X)

    clf = LogisticRegressionCV(Cs=50, cv=5, scoring='roc_auc', n_jobs=-1,
                               class_weight='auto')
    clf.fit(X_scaled, y)
    logger.debug('Best C: %f', clf.C_[0])
    logger.debug('Cs: %s', clf.Cs_)
    logger.debug('Grid scores: %f', clf.scores_)
    logger.debug('Ein: %f', Util.auc_score(clf, X_scaled, y))

    IO.dump_submission(Pipeline([('scale_raw', raw_scaler),
                                 ('lr', clf)]), 'lr_with_scale2_0704_03')
예제 #11
0
def lr_with_scale3():
    """
    Check the performance of normalizing TEST SET.

    Submission: lr_with_scale3_0707_04.csv
    E_val:
    E_in: 0.879233
    E_out: 0.8770121701777971

    Submission: lr_with_scale3_0712_01.csv
    E_val:
    E_in:
    E_out:
    """
    from sklearn.linear_model import LogisticRegression
    from sklearn.preprocessing import StandardScaler
    from sklearn.cross_validation import cross_val_score
    from sklearn.pipeline import Pipeline
    import numpy as np

    X, y = dataset.load_train()

    raw_scaler = StandardScaler()
    raw_scaler.fit(np.r_[X, dataset.load_test()])
    X_scaled = raw_scaler.transform(X)

    clf = LogisticRegression(C=0.03, class_weight='auto')
    clf.fit(X_scaled, y)

    logger.debug('E_in: %f', Util.auc_score(clf, X_scaled, y))
    IO.dump_submission(Pipeline([('scale_raw', raw_scaler),
                                 ('lr', clf)]), 'lr_with_scale3_0712_01')

    scores = cross_val_score(clf, X_scaled, y, scoring='roc_auc', n_jobs=-1)
    logger.debug('E_val: %f <- %s', np.average(scores), scores)
예제 #12
0
파일: map.py 프로젝트: hr203/c2raytools
def map_dbt_lightcone():
    for i in range(0, len(redshifts)):
        # print 'filename: setup_dirs.resultsdir()+'map_dbt_lightcone_'+str('%.3f' % redshifts[i])+'.dat'

        filename = setup_dirs.resultsdir() + "map_dbt_lightcone_" + str("%.3f" % redshifts[i]) + ".dat"
        temp_filename = setup_dirs.path() + "Temper3D_" + str("%.3f" % redshifts[i]) + ".bin"
        xfrac_filename = setup_dirs.path() + "xfrac3d_" + str("%.3f" % redshifts[i]) + ".bin"
        if i % 2 == 0:
            density_filename = (
                "/research/prace/244Mpc_RT/244Mpc_f2_8.2pS_250/coarser_densities/"
                + str("%.3f" % redshifts[i])
                + "n_all.dat"
            )
        else:
            density_filename = (
                "/research/prace/244Mpc_RT/244Mpc_f2_8.2pS_250/coarser_densities/"
                + str("%.3f" % redshifts[i - 1])
                + "n_all.dat"
            )

        tfile = c2t.TemperFile(temp_filename)
        xfile = c2t.XfracFile(xfrac_filename).xi
        if ss == " ":
            dfile = c2t.DensityFile(density_filename).cgs_density
        else:
            dfile = np.ones(ss ** 3).reshape(ss, ss, ss) * 1.981e-10 * (1 + redshifts[i]) ** 3

        dT_box = c2t.calc_dt_full_lightcone(xfile, tfile, dfile, redshifts[i])
        IO.writemap(dT_box, filename)
        print "Written map to " + filename
예제 #13
0
def getnewsitem2(url):
    
    f = urllib2.urlopen(url)
    rawhtml = f.read()
    #rawhtml = rawhtml.encode('iso-8859-9')
    f.close()
    encoding = f.headers['content-type'].split('charset=')[-1]

    
    markerTitle1 = '<title>'
    markerTitle2 = '</title>'
    title = extractitem(markerTitle1, markerTitle2, rawhtml)
    title = IO.encodingToutf8(title, encoding)
    title = title.split("/")[0]
    title = IO.replaceSpecialChars(title)
    
    markerText1 = '<div id="metin2" class="fck_li">'
    markerText2 = '<div class="IndexKeywordsHeader"'    # veya 'id="hiddenTitle"'
    text = extractitem(markerText1, markerText2, rawhtml)
    text = nltk.clean_html(text)
    
    text = IO.encodingToutf8(text, encoding)
    text = IO.replaceSpecialChars(text)
    
    return NewsItem(title, "", text, "")
예제 #14
0
파일: mean.py 프로젝트: hr203/c2raytools
def lightcone_stats(s='',ht=''):
    infile = setup_dirs.resultsdir()+'dbt_lightcone_smooth'+ht+'.bin'
    zfile = open(setup_dirs.resultsdir()+'dbt_lightcone_redshifts.bin','rb')
    meanfile = open(setup_dirs.resultsdir()+s+'_'+'mean'+'_'+ht+'lightcone.dat','w')
    skewnessfile = open(setup_dirs.resultsdir()+s+'_'+'skewness'+'_'+ht+'lightcone.dat','w')
    kurtosisfile = open(setup_dirs.resultsdir()+s+'_'+'kurtosis'+'_'+ht+'lightcone.dat','w')
    rmsfile = open(setup_dirs.resultsdir()+s+'_'+'rms'+'_'+ht+'lightcone.dat','w')
    redshiftfile = open(setup_dirs.resultsdir()+s+'_'+'zs'+'_'+ht+'.dat','w')
    lc = np.load(infile)
    zs = np.load(zfile)
    ratio=4. #smoothing ratio
    Lbox=244./0.7 #boxsize in cMpc
    SLbox=Lbox/ratio #size of smoothing box
    Nbox=250 #number of cells
    SNbox=int(Nbox/ratio)+1 #new number of cells
    print Lbox, SLbox, Nbox, SNbox
    for i in range(len(zs)-SNbox/2-2,SNbox/2,-1):
        mapfile=setup_dirs.resultsdir()+s+'_map_'+ht+str('%.3f'%zs[i])+'.bin'
        print "Doing redshift: " + str(zs[i])
        data,dims = c2t.get_lightcone_subvolume(lc,zs,zs[i],depth_mpc=SLbox,subtract_mean=False)
        IO.writebin(data,mapfile)
        redshiftfile.write(str(zs[i])+'\n')
        meanfile.write(str(np.mean(data))+'\n')
        rmsfile.write(str(np.sqrt(np.var(data)))+'\n')
        skewnessfile.write(str(c2t.statistics.skewness(data.flatten()))+'\n')
        kurtosisfile.write(str(c2t.statistics.kurtosis(data.flatten()))+'\n')
    print "Written statistics"
예제 #15
0
class Parser(object):
    '''
    classdocs
    '''


    def __init__(self,filename):
        
        '''
        Constructor
        '''
        self.io = IO(filename);
    
    def parse_line(self,separator = " "):
        
        matrice = []
        self.io.init_reader()
        for line in self.io.file :
            if separator != None :
                line = line.replace("\n","")
                tab = line.split(separator)
                i = 0
                for elem in tab :
                    tab[i] = float(elem)
                    i+=1
                matrice.append(tab)
        return matrice
        
예제 #16
0
 def fileSave(self):
     if (self.fileName == ""):
         self.fileSaveAs()
     else:
         IO.saveSquadra(self.squadra, self.fileName)
         self.ui.actionSave.setEnabled(False)
         self.isModified = False
예제 #17
0
파일: map.py 프로젝트: hr203/c2raytools
def smoothed_lightcone_dbt(id="", ht=""):
    infile = open(setup_dirs.resultsdir() + "dbt_lightcone" + ht + ".bin", "rb")
    outfile = setup_dirs.resultsdir() + "dbt_lightcone_smooth" + ht + ".bin"
    zfile = open(setup_dirs.resultsdir() + "dbt_lightcone_redshifts.bin", "rb")
    dT_box = np.load(infile)
    log = open("stats.dat", "w")
    zs = np.load(zfile)

    dT_box3 = np.zeros((len(dT_box[:, 1, 1]) / 2, len(dT_box[1, :, 1]) / 2, len(dT_box[1, 1, :])))
    dT_box2 = np.zeros((len(dT_box[:, 1, 1]) / 2, len(dT_box[1, :, 1]) / 2, len(dT_box[1, 1, :])))
    for z in range(len(dT_box[1, 1, :]) - 1, -1, -1):
        wl = 0.21 * (1 + zs[z])
        c = 299792.458
        omm = 0.27
        oml = 0.73
        omr = 0.0
        omk = 1.0 - omm - oml - omr
        H0 = 70.0

        def integrand(x, omm, oml, omr, omk):
            return 1.0 / np.sqrt(omr * ((1 + x) ** 4) + omm * ((1 + x) ** 3) + oml + omk * ((1 + x) ** 2))

        def dc(z, omm, oml, omr, omk):
            return quad(integrand, 0, z, args=(omm, oml, omr, omk))[0]  # /(1.0+z)

        vec_dc = np.vectorize(dc)

        bw_r = wl / (2.0e3)  # radians
        bw = bw_r * 3437.74677  # arcminutesi
        log.write("Wavelength of 21-cm from redshift " + str(zs[z]) + " is " + str(wl) + "m\n")
        log.write("At redshift: " + str(zs[z]) + " smoothing with a " + str(bw) + " arc minute beam.\n")

        rc = bw_r * c / H0 * vec_dc(zs[z], omm, oml, omr, omk)  # comoving Mpc
        Hz = H0 * np.sqrt(omr * (1 + zs[z]) ** 4 + omm * (1 + zs[z]) ** 3 + oml + omk * (1 + zs[z]) ** 2)

        log.write("rc = " + str(rc) + "\n")
        #        dnu=nu0*Hz*rc/(c*(1+zs[z])**2)
        dz = rc * Hz / c
        log.write("$\Delta$ z = " + str(dz) + "\n")
        ncs = rc * 250.0 * 0.7 / 244.0
        log.write(str(ncs) + " cells in the z direction\n")
        log.write("\n")
        #        rc_h=rc/0.7 # comoving Mpc/h
        #        print "This corresponds to "+str(rc_h)+"Mpc/h on the sky"

        dT_box2[:, :, z] = c2t.beam_convolve(dT_box[:, :, z], zs[z], 244.0, beam_w=bw)

        if z > ncs and z + ncs < zs[len(zs) - 1]:
            for x in range(len(dT_box2)):
                for y in range(len(dT_box2)):
                    dT_box3[x, y, z] = np.mean(dT_box2[x, y, z - ncs / 2.0 : z + ncs / 2.0])
        else:
            print "..."
            dT_box3[:, :, z] = dT_box2[:, :, z]

        IO.writebin(dT_box3[:, :, z], setup_dirs.resultsdir() + "smoothed_map_dbt_" + ht + str("%.3f" % zs[z]) + ".bin")

    IO.writebin(dT_box3, outfile)
    print "Written map to " + outfile
예제 #18
0
def temporary_upload(reservatId, lines):
	IO.delete_DB_upload()
	values = []
	#id_reservatorio,cota,volume,volume_percentual,data_informacao,visualizacao,fonte
	for value in lines[1:]:
		aux = [reservatId] + value.split(',')
		values.append([int(reservatId),'',float(aux[1]),float(aux[2]),datetime.strptime(aux[4], '%d/%m/%Y').strftime('%Y-%m-%d'),1,aux[3]])
	IO.insert_many_BD_upload(values)
예제 #19
0
파일: rnnMain.py 프로젝트: DLYEE/DNN
def test():
    global inputBatches, keyOrder
    outputData = {}
    possibilityVectors = []
    rnnTrainFunc.testing(inputBatches, keyOrder, outputData, possibilityVectors)
    inputBatches = None
    IO.writeFile('../rnnFrame.csv', '../data/rnnTest.prb', possibilityVectors, outputData, keyOrder)
    IO.trimOutput('../rnnFrame.csv', '../rnn.csv')
예제 #20
0
def ps():
    for i in range(len(redshifts)):
        data = IO.readmap("dbt_" + str("%.3f" % redshifts[i]))
        ps = power_spectrum_1d(data)
        ps = np.asarray(ps)
        print np.shape(ps), type(ps)
        IO.writedata(ps[0, :], "data/powerSpectra_dbt_" + str("%.3f" % redshifts[i]) + ".dat")
        IO.writedata(ps[1, :], "data/powerSpectraFrequencies_dbt_" + str("%.3f" % redshifts[i]) + ".dat")
예제 #21
0
파일: plot_1D.py 프로젝트: hr203/c2raytools
def powerSpec():
    ''' plotmap.plotdbt() '''
    for i in range(len(redshifts)):
        fr=IO.readoned("powerSpectraFrequencies_dbt_100b_"+str('%.3f' % redshifts[i]))
        data = IO.readoned("powerSpectra_100b_"+str('%.3f' % redshifts[i]))
        ps=data*fr**3./(4.*np.pi**2.)
        ps=np.sqrt(ps)
        plot_powerspectra(fr,ps,"ps_dbt_100b_notsquare"+str(i+10)+'_'+str(redshifts[i]),i)
예제 #22
0
파일: rnnMain.py 프로젝트: DLYEE/DNN
def readTrain():
    global inputBatches, labelBatches, keyOrder
    inputBatches = None
    labelBatches = None
    keyOrder = None
    inputData, keyOrder, length = IO.readFile('../data/train.prb')
    label = IO.readLabel('../data/label/train_fixed.lab', 48)
    inputBatches, labelBatches = rnnTrainFunc.makeBatch(inputData, keyOrder, label, 'train')
예제 #23
0
def crawl_radikal(start, numOfPages, categoryID):
    
    name = "radikal"
    rootlink_item = "http://www.radikal.com.tr/Radikal.aspx?aType=RadikalDetayV3&ArticleID="
    #rootlink_id = "http://www.radikal.com.tr/Radikal.aspx?aType=RadikalKategoriTumuV3&CategoryID=81&PAGE="
    #rootlink_id = "http://www.radikal.com.tr/Radikal.aspx?aType=RadikalKategoriTumuV3&CategoryID="+str(categoryID)+"&PAGE="
    rootlink_id = "http://www.radikal.com.tr/"+str(cat_radikal[categoryID])+"/tum_haberler-"
    
    #item
    markerTitle1 = 'class="turkiye-tc">'   #'<title>'
    markerTitle2 = '</h1></div>'  #'</title>'
    
    
    '''  eski:
    markerText1 = '<div id="metin2" class="fck_li">'
    markerText2 = '<div class="IndexKeywordsHeader"'    # veya 'id="hiddenTitle"'
    
    idlimit1 = "<div class=\"cat-news\"><ol";
    idlimit2 = "var Geri = 'Geri'";
    
    '''
    
    ''' 10 Ekim itibariyle  '''
    markerText1 = '<div id="metin2">'
    markerText2 = '<div class="social-area clearfix sc-news-bottom">'   #'<div class="article_end"'
    
    markerDate1 = '<span class="date">'  #'<div class="text_size"><span>'   #'<p class="date">'
    markerDate2 = '</span><div class="options">'   #'</span><span>'  #'</p>'
    
    # authors in radikal are inextractable. names are inside text (div id=metin2..)
    markerAuthor1 = '=MuhabirArama&amp;Keyword='
    markerAuthor2 = '</a>'
    
    idlimit1 = "<div class=\"box_z_a\"><div class=\"news mr20\">"  
    idlimit2 = "<div id=\"paging\""
    
    pattern1 = r"_[a-z0-9]+-[0-9]{6,10}"        #r";articleid=[0-9]{6,9}"    #r";ArticleID=[0-9]{6,9}"
    pattern2 = r'[0-9]{6,10}'
    
    
    resource1 = NewsResource(name, rootlink_id, rootlink_item, idlimit1, idlimit2, pattern1, pattern2, markerTitle1, markerTitle2, markerText1, markerText2, markerDate1, markerDate2, markerAuthor1, markerAuthor2)
    resource1.setEncoding('iso-8859-9')
    
    #start = 1
    #numOfPages = 2
    rooturl = resource1.rootlink_id
    IDlist = []
    for i in range(start,start+numOfPages):
        url = rooturl + str(i)
        IDlist = IDlist + retrieveNewsIDs(resource1, url)
    
    IDlist = list(set(IDlist))
    categoryName = cat_radikal[categoryID]
    path = resource1.newsidpath+categoryName+"_newsIDs"+str(start)+"-"+str(numOfPages)+".txt"
    IO.todisc_list(IDlist, path)
    
    crawlresourceItems(resource1, IDlist, categoryName)
예제 #24
0
 def cacheOrComputeKernel(self, options, filename, f):
     if options.useCache == 1 and os.path.exists(filename):
         print >> sys.stderr, 'Loading kernel from file:', filename
         return IO.unpickle(filename)
     else:
         K = f(self)
         print >> sys.stderr, 'Saving kernel to file:', filename
         IO.pickle(filename, K)
         return K
예제 #25
0
파일: plot_1D.py 프로젝트: hr203/c2raytools
def allPowerSpec():
    fr=IO.readoned("powerSpectraFrequencies_dbt_100b_"+str('%.3f' % redshifts[0]))
    ps=np.zeros(len(redshifts)*len(fr)).reshape(len(redshifts),len(fr))
    for i in range(0,len(redshifts),10):
        ps[i,:] = IO.readoned("powerSpectra_100b_"+str('%.3f' % redshifts[i]))
        #print len(data), len(fr)
    ps=ps*fr**3./(4.*np.pi**2.)
    ps=np.sqrt(ps)
    plot_powerspectra(fr,ps,"ps_dbt_100b_notsquare_all",'null')
예제 #26
0
파일: map.py 프로젝트: hr203/c2raytools
def map_xfrac(id):
    #    for i in range(len(redshifts)-6,len(redshifts)-4):
    print len(redshifts)
    for i in range(start, len(redshifts)):
        filename = setup_dirs.resultsdir() + "map_xfrac" + id + "_" + str("%.3f" % redshifts[i]) + ".dat"
        xfrac_filename = setup_dirs.path() + "xfrac3d" + id + "_" + str("%.3f" % redshifts[i]) + ".bin"
        xfile = c2t.XfracFile(xfrac_filename)
        IO.writemap(xfile.xi, filename)
    print "Written map to " + filename
예제 #27
0
def rf():
    """
    Submission: rf_0708_01.csv
    3000 trees
    E_val: 0.871837
    E_in: 0.999998
    E_out: 0.882316801296279
    15000 trees
    E_val: 0.872011
    E_in: 0.999998
    E_out: 0.8824869811781106
    30000 trees
    E_val: 0.871928
    E_in:
    E_out:

    depth=4; 12000 trees
    E_val: 0.969158
    E_in:
    E_out:
    """
    from sklearn.preprocessing import StandardScaler
    from sklearn.pipeline import Pipeline
    from sklearn.ensemble import RandomForestClassifier
    import numpy as np

    X, y = dataset.load_train(depth=1)

    raw_scaler = StandardScaler()
    raw_scaler.fit(np.r_[X, dataset.load_test()])
    X_scaled = raw_scaler.transform(X)
    del X
    import gc
    gc.collect()

    rf = RandomForestClassifier(n_estimators=12000, oob_score=True, n_jobs=-1,
                                class_weight='auto')
    rf.fit(X_scaled, y)

    logger.debug('RandomForestClassifier fitted')

    logger.debug('E_val(oob): %f', rf.oob_score_)
    logger.debug('E_in(full): %f', Util.auc_score(rf, X_scaled, y))

    X, y = dataset.load_train()
    X_scaled = raw_scaler.transform(X)
    logger.debug('E_in (depth=0): %f', Util.auc_score(rf, X_scaled, y))
    del X
    gc.collect()

    IO.dump_submission(Pipeline([('scale_raw', raw_scaler),
                                 ('rf', rf)]), 'rf_0708_01')

    logger.debug('caching fitted RandomForestClassifier')
    IO.cache(rf, Path.of_cache('rf.RandomForestClassifier.12000.pkl'))
    logger.debug('cached fitted RandomForestClassifier')
예제 #28
0
def load_d10d11():
	import IO
	from pandas import concat
	fit10, par10, gen10, ids10 = IO.load_pickled_generation_dataframe('d10')
	fit11, par11, gen11, ids11 = IO.load_pickled_generation_dataframe('d11')
	par10['sc_nAgents'] = 150
	par11['ssmm_nAgents'] = 52
	par = concat([par10, par11])
	fit = concat([fit10, fit11])
	return fit, par
예제 #29
0
파일: EquSys.py 프로젝트: jksr/beta2
	def __init__(self, extballfn, extstickfn, forcevec, kassigner):
		self.ballfn = extballfn
		self.stickfn = extstickfn
		self.f = forcevec
		self.kassigner = kassigner
		self.balldat = IO.read_balls(extballfn)
		self.ballnum = len(self.balldat['COORDX'])
		self.stickdat = IO.read_sticks(extstickfn)
		self.sticknum = len(self.stickdat['BALL1'])
		self.k_mat = self.__get_k_matrix()
예제 #30
0
def issue_101_plot_pars_vs_fitness(dataset, overshoot_threshold, preloaded_data = None):
	from plotting import get_pretty_xy_plot, make_pretty_scatter_plot
	from numpy import where


	def get_plots_to_make(fitness_types):
		plots_to_make = list()
		for fitness_type in fitness_types:
			for stat in stats:
				plots_to_make.append((fitness_type, stat))
		return plots_to_make


	def mkplot(all_data, groupby, plots_to_make):
		g = all_data.groupby(groupby)
		#x = g.groups.keys()
		s = all_data.sort(groupby)
		sorted_x, index_order = zip(*sorted(zip(g.groups.keys(), range(len(g.groups.keys())))))
		for attr, stat in plots_to_make:
			print groupby, attr, stat
			y = getattr(g[attr],stat)()
			filename = '%s%s__vs__%s(%s)'%(folder, groupby, attr, stat)
			ax, fig = get_pretty_xy_plot(sorted_x, y, groupby, '%s (%s)'%(attr, stat), filename, g[attr].std()/2, save_figure = False)
			filename = '%s%s__vs__%s(%s)_scatter'%(folder, groupby, attr, stat)
			make_pretty_scatter_plot(s[groupby], s[attr], groupby, '%s (%s)'%(attr, stat), filename, ax=ax, fig=fig)
	
	def run_analysis(groups, data, plots_to_make):
		for groupby in groups:
			mkplot(data, groupby, plots_to_make)

	
	folder = make_issue_specific_figure_folder('101_pars_vs_fits', dataset)
	stats = ['mean']

	if dataset == 'd10d11':
		f, p = utils.load_d10d11()
	else:
		f,p,g, i=IO.load_pickled_generation_dataframe(dataset_name=dataset)
		if 'dataset' == 'd10':
			p['sc_nAgents'] = 150
		elif 'dataset' == 'd11':
			p['ssmm_nAgents'] = 52

	

	if preloaded_data is None: 
		fit, par, gen, ids = IO.load_pickled_generation_dataframe(dataset)
	else:
		try:
			fit = preloaded_data['fit']
			par = preloaded_data['par']
		except KeyError, e:
			print "Provide dict with keys 'fit' and 'par' containing dataframes for fit and par data"
			print e
			sys.exit(1)
예제 #31
0
	def getRecordedFrame():
		global frame_number
		frame_number = frame_number + 1
		prefix =  os.path.join(os.environ['GRIP_TEMP'],'saved_frame_')
		file_name = prefix + str(frame_number) + ".data"
		if ( not os.path.isfile(file_name) ):
			frame_number = 1
		file_name = prefix + str(frame_number) + ".data"
		header,data = IO.load(file_name)
		return data
예제 #32
0
def Add(song, title=None, artist_info=None):
    song = IO.audio_to_signal(song)
    if title is None:
        title = song['audio_name']
    song = Song.Song(song['audio_signal'],
                     sampling_rate_info=song['sampling_rate'],
                     address_info=song['audio_source'],
                     title_info=title,
                     artist_info=artist_info)
    DbManager.library_add(song)
예제 #33
0
    def play(self, purse):
        first_draw = True
        # XXX - insurance
        if self.blackjack():
            print("[BLACKJACK]")
            return 21
    
        while True:                    # actually, until we bust, surrender or stand
            # XXX - split
            # XXX - this is `late surrender', early surrender has to be
            # handled at insurance time, if it is to be offered
            if first_draw:
                action = IO.getresp(
                    "[H]it, [D]ouble down, [S]tand, or S[u]rrender (HDSU)? ",
                    "Please enter [H], [D], [S], or [U]: ",
                    ["h", "d", "s", "u"], "" )
            else:
                action = IO.getresp(
                    "[H]it or [S]tand (HS)? ",
                    "Please enter [H] or [S]: ",
                    ["h", "s"], "")

            if action == "h":
                print("You draw the", end=' ')
                if self.hit() == 0:
                    return 0
                # XXX some casinos allow DD after split.  some don't (confirm)
                first_draw = False
            elif action == "s":
                print("You stand")
                return self.value()
            elif action == "d":
                if purse.purse < table_min:
                    print("You cannot afford to double down!")
                    continue
                newbet = IO.getbet(table_min, min(purse.currbet, table_limit))
                purse.doubledown(newbet)
                print("You draw the", end=' ')
                return self.hit()
            elif action == "u":
                print("You surrender")
                purse.surrender()
                return 0
def report_incomplete_profiles(list_profile_filenames,
                               output_file,
                               expected_length,
                               indices_mode=False,
                               identify_broken_ones_only=False):
    any_files_incomplete = False
    with open(output_file, 'w') as wf:
        counter = 0
        for fn in list_profile_filenames:
            with open(fn, 'rb') as rf:
                bitstring = rf.read()
                is_broken = False
                if indices_mode:
                    try:
                        decompressed_profiles_array = IO.decompress_profiles_indices(
                            bitstring)
                    except:
                        decompressed_profiles_array = []
                        is_broken = True
                else:
                    try:
                        decompressed_profiles_array = IO.decompress_profiles(
                            bitstring)
                    except:
                        decompressed_profiles_array = []
                        is_broken = True
                if not identify_broken_ones_only:
                    if len(decompressed_profiles_array) != expected_length:
                        any_files_incomplete = True
                        string_to_write = 'File %s contains %d profiles instead of %d\n' % \
                                        (fn, len(decompressed_profiles_array), expected_length)
                        wf.write(string_to_write)
                else:
                    if is_broken:
                        string_to_write = 'File %s contains is broken\n' % fn
                        wf.write(string_to_write)

            counter += 1
            # print("Processed file number ", counter)

        if any_files_incomplete:
            wf.write("Some incomplete files have been identified!\n")
        wf.write("All the files in the specified folder have been processed")
예제 #35
0
def submit_job(job_atom):
    '''submit a job to cluster or your own computer'''

    if os.path.exists(INFILEPATH) is not True:
        os.system("mkdir " + INFILEPATH)
    if os.path.exists(OUTFILEPATH) is not True:
        os.system("mkdir " + OUTFILEPATH)

    homedir = os.getcwd()
    _, tail = os.path.split(homedir)
    jobname = tail + "." + job_atom.name

    infile = os.path.join(INFILEPATH,
                          "_in_{0}_{1}".format(job_atom.name, job_atom.pid))
    outfile = os.path.join(
        OUTFILEPATH, "out_{0}_{1}.txt".format(job_atom.name, job_atom.pid))
    jobfile = os.path.join(
        workdir, "_job_{0}_{1}.sh".format(job_atom.name, job_atom.pid))
    IO.SaveDict(infile, "w", job_atom.para)
    if job_atom.is_cluster:
        with open(jobfile, "w") as fjob:
            fjob.write("#!/bin/sh\n" + "#PBS -N " + jobname + "\n")
            if hasattr(job_atom, "pbs_command"):
                fjob.write(job_atom.pbs_command + "\n")
            fjob.write("#PBS -l walltime=200:00:00\n")
            fjob.write("#PBS -o " + homedir + "/Output\n")
            fjob.write("#PBS -e " + homedir + "/Error\n")
            fjob.write("echo $PBS_JOBID >>" + homedir + "/id_job.log\n")
            fjob.write("cd " + homedir + "\n")
            fjob.write(job_atom.execute + " -f " + infile)
        if job_atom.auto_run:
            os.system("qsub " + jobfile)
            os.system("rm " + jobfile)
            logging.info(job_atom.get_job_name() + " submitted!")
        else:
            print "You have to run " + job_atom.get_job_name(
            ) + " by yourself!"
    else:
        if job_atom.auto_run:
            #shellstr = "exec "+job_atom.execute+" -f "+infile+" >> "+outfile
            shellstr = "exec " + job_atom.execute + " -f " + infile
            proc = subprocess.Popen(shellstr, shell=True)
            if job_atom.keep_cpu_busy:
                PROCLIST.append((proc, job_atom))
            else:
                PROCLIST_BACK.append((proc, job_atom))

            logging.info(job_atom.get_job_name() + " is started...")
            logging.info("input:\n" + str(job_atom.para))
            logging.info("PID:{0}\n".format(proc.pid))
            print job_atom.get_job_name() + " is started..."
        else:
            print "You have to run " + job_atom.get_job_name(
            ) + " by yourself!"
    return
예제 #36
0
    def merge_pdbs(self, writedir):
        replacements = {}
        replacements['LIG'] = 'LID'
        pattern = re.compile(r'\b(' + '|'.join(replacements.keys()) + r')\b')
        file_replaced = []
        atnr = self.atomoffset
        with open(self.lig2 + '.pdb') as infile:
            for line in infile:
                if line.split()[0].strip() in self.include:
                    line2 = pattern.sub(lambda x: replacements[x.group()], line)
                    file_replaced.append(line2)
                    
        with open(self.lig1 + '.pdb') as infile,                                            \
             open(writedir + '/' + self.lig1 + '_' + self.lig2 + '.pdb', 'w') as outfile:

            if self.system == 'protein':
                with open('protein.pdb') as protfile:
                    for line in protfile:
                        outfile.write(line)
                        
            for line in infile:
                if line.split()[0].strip() in self.include:
                    resnr = int(line[22:26])
                    # Temporary fix NaN error of overlapping heavy atom in Q, add offset
                    atnr += 1
                    atom1 = IO.pdb_parse_in(line)
                    atom1[1] = atom1[1] + self.atomoffset
                    atom1[6] = atom1[6] + self.residueoffset
                    atom1[8] = float(atom1[8]) + 0.001
                    atom1[9] = float(atom1[9]) + 0.001
                    atom1[10] = float(atom1[10]) + 0.001
                    line = IO.pdb_parse_out(atom1) + '\n'
              
                    outfile.write(line)
                    
            self.residueoffset = self.residueoffset + 2
            resnr = '{:4}'.format(self.residueoffset)
            for line in file_replaced:
                atnr = atnr + 1
                atchange = '{:5}'.format(atnr)
                line = line[0:6] + atchange + line[11:22] + resnr + line[26:]
                outfile.write(line)
예제 #37
0
def main():

    Location = []
    Location, ratings, idList, priceList = search.searchWithOffset(20)
    kmeans = KMeans(n_clusters=5, random_state=0).fit(Location)
    maxLables = Clusters.attachLables(ratings, kmeans.labels_, 5)
    Clusters.showCluster(Location, 5, kmeans.cluster_centers_, kmeans.labels_,
                         maxLables, "kmeans_on_ratings.pdf")
    Location = np.array(Location)
    #print("Silhouette Score:",metrics.silhouette_score(Location, kmeans.labels_,metric='euclidean'))

    gmm = mixture.GMM(n_components=5, covariance_type='full')
    gmm.fit(Location)
    gaussian = gmm.predict(Location)
    maxLables = Clusters.attachLables(ratings, gaussian, 5)
    Clusters.showCluster2(Location, 5, gaussian, maxLables,
                          "gmm_on_ratings.pdf")
    Location = np.array(Location)
    #print("Silhouette Score:",metrics.silhouette_score(Location, gaussian,metric='euclidean'))

    kmeans = KMeans(n_clusters=4, random_state=0).fit(Location)
    maxLables = Clusters.attachLables(priceList, kmeans.labels_, 4)
    Clusters.showCluster(Location, 4, kmeans.cluster_centers_, kmeans.labels_,
                         maxLables, "kmeans_on_price.pdf")
    Location = np.array(Location)
    #print("Silhouette Score:",metrics.silhouette_score(Location, kmeans.labels_,metric='euclidean'))

    gmm = mixture.GMM(n_components=4, covariance_type='full')
    gmm.fit(Location)
    gaussian = gmm.predict(Location)
    maxLables = Clusters.attachLables(priceList, gaussian, 4)
    Clusters.showCluster2(Location, 4, gaussian, maxLables, "gmm_on_price.pdf")
    Location = np.array(Location)
    #print("Silhouette Score:",metrics.silhouette_score(Location, gaussian,metric='euclidean'))

    path = "location.csv"
    IO.csv_writer(Location, path, ',')

    long, la = IO.csv_reader(path)
    mapAPI.mapOut(GOOGLE_API_KEY, long, la)

    search.searchReviews(idList)
예제 #38
0
def assemble_from_files(pdb_files, transformation_files, transformation_residue_pairs, movable_jumps, connections, output_path, sasa_threshold=600):
    '''Assemble two secondary structures given the PDB files, transformation
    file, transformation residues, movable jumps and connections.
    The outputs will be saved to the output_path.
    '''
    task_info = {}
    start_time = datetime.datetime.now()

    # Load the structures

    poses = []
    for pdb_file in pdb_files:
        poses.append( rosetta.core.pose.Pose() )
        rosetta.core.import_pose.pose_from_file(poses[-1], pdb_file)

    seqpos_map = assemble_helpers.make_seqpos_map(poses)

    # Do the pre-assembly

    Ts = [IO.load_rigid_body_transformation_from_file(tf) for tf in transformation_files]

    assemble_helpers.pre_assemble(poses, transformation_residue_pairs, Ts)
  
    merged_pose = poses[0]

    # Do the assembly

    assemble(merged_pose, movable_jumps, connections, seqpos_map, task_info, sasa_threshold)

    merged_pose.dump_pdb(os.path.join(output_path, 'assembled.pdb'))
    IO.sequence_to_fasta_file(os.path.join(output_path, 'assembled.fasta'), 'assembled', merged_pose.sequence())

    end_time = datetime.datetime.now()
    run_time = end_time - start_time

    # Save the task info

    task_info['sequence'] = merged_pose.sequence()
    task_info['score'] = merged_pose.energies().total_energy()
    task_info['run_time'] = run_time.total_seconds()

    IO.save_task_info_file(output_path, task_info)
예제 #39
0
def exr2flow(exr, shape=(600, 800), h=600, w=800):
    """
    Returns a 2 channel image where the first channel is the disparity in X-direction and sencond channel is the
    disparity in the Y-direction.
    
    Convention:
    pixel moves right +ve flow in X-direction
    pixel moves down +ve flow in Y-direction

    :param exr: path to exr file
    :param shape of the exr files
    :return: flow
    """
    file = OpenEXR.InputFile(exr)

    # Compute the size
    dw = file.header()['dataWindow']
    sz = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)

    FLOAT = Imath.PixelType(Imath.PixelType.FLOAT)
    (R, G, B) = [
        array.array('f', file.channel(Chan, FLOAT)).tolist()
        for Chan in ("R", "G", "B")
    ]

    img = np.zeros((h, w, 2), np.float64)
    if np.max(-np.array(R).reshape(img.shape[0], -1)) < 0.001 and np.min(
            -np.array(R).reshape(img.shape[0], -1)) > -0.001:
        img[:, :, 0] = np.round(-np.array(R).reshape(img.shape[0], -1))
    else:
        img[:, :, 0] = -np.array(R).reshape(img.shape[0], -1)
    if np.max(-np.array(G).reshape(img.shape[0], -1)) < 0.001 and np.min(
            -np.array(G).reshape(img.shape[0], -1)) > -0.001:
        img[:, :, 1] = np.round(np.array(G).reshape(img.shape[0], -1))
    else:
        img[:, :, 1] = np.array(G).reshape(img.shape[0], -1)

    # plt.imshow(img[:,:,1],cmap="gray")
    # plt.title("sadfasfd")
    # plt.show()
    io.write(exr[:-4] + ".flo", img)
    return img
예제 #40
0
def genProsody(input_hanzi_file, input_pinyin_file, save_prosody_file):
    """gen prosody"""
    hanzi_lines = IO.readList(input_hanzi_file)
    pinyin_lines = IO.readList(input_pinyin_file)
    assert len(hanzi_lines) == len(pinyin_lines)
    results = []
    for i in range(len(hanzi_lines)):
        hz_line_array = hanzi_lines[i].split('|')
        py_line_array = pinyin_lines[i].split('|')
        print('Processing ' + hz_line_array[0])
        cur_words = createWords(hz_line_array[1])
        cur_phons = createPhons(py_line_array[1])
        assert len(cur_phons) == len(cur_words)
        words_prosody = setWordsProdosy(cur_words, cur_phons)

        # output
        results.append(hz_line_array[0])
        results.append(' '.join(words_prosody))
        results.append('[' + '['.join(cur_phons))
    IO.write(save_prosody_file, results)
예제 #41
0
 def center_of_mass(self, inpcrd_file_name, prmtop_file_name):
     """
     return the center of mass of self._crd
     """
     prmtop = dict()
     prmtop = IO.PrmtopLoad(
         prmtop_file_name).get_parm_for_grid_calculation()
     com = np.zeros([3], dtype=float)
     masses = prmtop["MASS"]
     crd_temp = IO.InpcrdLoad(inpcrd_file_name).get_coordinates()
     natoms = prmtop["POINTERS"]["NATOM"]
     if (crd_temp.shape[0] != natoms) or (crd_temp.shape[1] != 3):
         raise RuntimeError("coordinates in %s has wrong shape" %
                            inpcrd_file_name)
     for atom_ind in range(len(crd_temp)):
         com += masses[atom_ind] * crd_temp[atom_ind]
     total_mass = masses.sum()
     if total_mass == 0:
         raise RuntimeError("zero total mass")
     return com / total_mass
예제 #42
0
def find_equilibrium_concentration(params,
                                   geom,
                                   execline,
                                   targets,
                                   path='./',
                                   funcparams=[0]):
    '''
  For a given set of parameters, this function simulates sputtering over a range of concentrations,
  and identifies that concentration at which the sputter rate is exactly equal to the supply rate;
  i.e., the steady concentration.  Identifying this concentration is necessary for estimation of
  parameters that appear in two-component systems.
  '''

    if (len(funcparams) == 3):
        alpha = funcparams[0]
        beta = funcparams[1]
        gamma = funcparams[2]
    for tt in targets:
        params.target = tt
        if (params.funcname and len(funcparams) == 3):
            gm = gamma_max(tt[1][1], alpha, beta)
            if gm <= 1.0:
                gamma *= gm
            params.cfunc = lambda depth: phistar_list(depth, tt[1][1], alpha,
                                                      beta, gamma)
        ensure_data(params, geom, execline, path)

    m0e_avgs = io.array_range(path, params, "target", targets, "m0e_avg")

    n = 0  #find the intersection point, where the index [n] refers to the point after intersection, and at phi[n], a is less than b
    if (m0e_avgs[n][0] > m0e_avgs[n][1]):
        a = 0
        b = 1
    else:
        a = 1
        b = 0
    while (m0e_avgs[n][a] > m0e_avgs[n][b]):
        n += 1
    #currently the above logic is only necessary to set up the while loop
    #targets has form [ [["Ga", phi],["Sb", 1-phi]], etc ]
    #for a certain range of phi. We can access these values directly through
    #targets[n] --> [["Ga", phi], ["Sb", 1-phi]]
    #targets[n][0][1]

    phi0 = targets[n - 1][0][1]
    phi1 = targets[n][0][1]

    equilibrium_concentration = (phi1 *
                                 (m0e_avgs[n - 1][a] - m0e_avgs[n - 1][b]) -
                                 phi0 * (m0e_avgs[n][a] - m0e_avgs[n][b])) / (
                                     m0e_avgs[n - 1][a] + m0e_avgs[n][b] -
                                     m0e_avgs[n - 1][b] - m0e_avgs[n][a])

    return equilibrium_concentration
예제 #43
0
    def listen(self) -> Union[str, None]:

        # listen
        with self.sr.Microphone() as source:
            self.listener.adjust_for_ambient_noise(source)
            IO.stdout("Listening...")
            audio = self.listener.listen(source)
            IO.stdout("Recognising...")

        cmd = None
        try:
            cmd = self.listener.recognize_google(audio, language="en-GB")
        except self.sr.UnknownValueError as e:
            logging.warning("Could not understand audio, try again")
            return self.listen()
        except self.sr.RequestError as e:
            # NOTE: default API key only allows 50 requests per day
            logging.error("Could not request results - no internet connection or invalid API key, temporarily reverting to standard input", exc_info=e)

        return cmd
예제 #44
0
    def __init__(self, window: QMainWindow):

        self.window = window
        print(os.getcwd())
        window.setWindowIcon(QIcon(IO.resource_path("img/icon.ico")))

        self.set_important_ui_elements()
        self.set_icons()
        self.connect_ui()
        #load the saved configurations and add them to the menubar
        self.create_menu()
예제 #45
0
 def save():
     display.stop()
     save_io = IO.RuntimeIO()
     save_io.celas = display.getCelas()
     save_io.graphic = io.graphic
     io.global_settings["Initial time"] = display.getTime()
     save_io.global_settings = io.global_settings
     save_io.N = display.getCelaNum()
     filename = g.QFileDialog.getSaveFileName()
     if filename:
         save_io.save(filename)
예제 #46
0
    def split_PDB(self):
        if self.AA == False:
            return None

        with open(self.lig + '.pdb') as infile, \
             open('tmp.pdb', 'w') as outfile:
            for line in infile:
                if IO.pdb_parse_in(line)[2] not in self.backbone:
                    outfile.write(line)

        self.pdbfiles.append('tmp.pdb')
예제 #47
0
def main():
    import_modules()
    args = handler()

    seqs_dict, seqs_order = IO.read_rna_bin_file(args.rna_bin_filename)
    window_length = calculate_window_length(args, do_print=True)
    bytestring = fold_all_sequences_wrapper(seqs_dict, seqs_order,
                                            window_length, args)

    with open(args.folded_bin_filename, 'wb') as wf:
        wf.write(bytestring)
예제 #48
0
    def saveFile(self):

        save_file = open(self.filename, 'w')
        html = '%s' % self.html
        html.encode('utf-8')
        HtmlToNoteFile = HtmlToNotefile.HtmlToNoteFileFormat(
            IO.encode_utf(html))
        print(HtmlToNoteFile.listToMarkup())

        save_file.write('%s' % html)
        save_file.close()
예제 #49
0
 def get_extra_aliases(self):
     extra_aliases_temp = IO.load_txt(self.extra_aliases_file)
     label_synset_list = [line.split('\t') for line in extra_aliases_temp]
     extra_aliases = {}
     for label, synset_str in label_synset_list:
         synset_list = synset_str.split(',')
         for synset in synset_list:
             if synset in extra_aliases:
                 raise
             extra_aliases[synset] = label
     return extra_aliases
예제 #50
0
    def read_route(self, filename):
        # wczytanie rozwiązania z pliku
        try:
            route, profit, salesman_time = IO.read_solution(filename)
            print(route)
        except Exception as e:
            self.error_message(
                f'Wystąpił nieoczekiwany błąd podczas wczytywania trasy: {e}')
            return

        self.solution = route
예제 #51
0
def m2mo(m,folder) :
	"""
		convert code mass in kg
	"""
	c = Constantes()

	param = IO.ParamsInfo(folder = folder).get()

	unit_m = float(param["unit_mass"])
	unit_m /= c.MO
	return m*unit_m 
예제 #52
0
def erf():
    """
    Submission: erf_0705_01.csv
    3000 trees
    E_val: 0.870800
    E_in: 0.999998
    E_out:
    15000 trees
    E_val:
    E_in:
    E_out:
    """
    from sklearn.preprocessing import StandardScaler
    from sklearn.pipeline import Pipeline
    from sklearn.ensemble import ExtraTreesClassifier

    X, y = dataset.load_train()

    raw_scaler = StandardScaler()
    raw_scaler.fit(X)
    X_scaled = raw_scaler.transform(X)
    del X

    rf = ExtraTreesClassifier(n_estimators=3000, oob_score=True, n_jobs=-1,
                              class_weight='auto', bootstrap=True)
    rf.fit(X_scaled, y)

    logger.debug('ExtraTreesClassifier fitted')

    import gc
    gc.collect()

    logger.debug('Eval(oob): %f', rf.oob_score_)
    logger.debug('Ein: %f', Util.auc_score(rf, X_scaled, y))

    IO.dump_submission(Pipeline([('scale_raw', raw_scaler),
                                 ('erf', rf)]), 'erf_0705_01')

    logger.debug('caching fitted ExtraTreesClassifier')
    IO.cache(rf, Path.of_cache('erf.ExtraTreesClassifier.auto.pkl'))
    logger.debug('cached fitted ExtraTreesClassifier')
예제 #53
0
def main(datapath='processed_data', regionpath='',regionfile='data', orbitpath='', orbitfile='orbits'):

    global path
    path = datapath

    # load region data
    region = None
    try:
        region = IO.read_file(filepath=regionpath, filename=regionfile)
    except:
        region = classes.Region()

    #orbit file, as list
    orbits = IO.read_file(filepath=orbitpath, filename=orbitfile)

    n = classes.Iterator()
    n.set_min( 0 )
    n.set_max( len(orbits) )

    # index of current frame
    index, orbit = get_next_orbit(list(region.get()), orbits)
    n.set(index)

    n.set_mapping(orbits)

    #frame, t = get_orbit_legacy(data, n.get_mapping())
    frame, t = get_orbit(orbit=n.get_mapping())
    ima, els, mag = check_validity(frame,t)


    # plot initial data
    global ylimits
    ylimits = {'IMA': [0, 96], 'ELS' : [0, 128], 'MAG': [-40, 40]}

    fig, axis, i = plot_orbit(n, region, xsize=9, ysize=6)

    fig.suptitle('Orbit {} ({})'.format(n.get_mapping(), i.get_mapping()))

    define_GUI(i,n,region,fig,axis)
    
    return (region, orbits)
예제 #54
0
def draw(x, y, w, h, display):
    weather_stored = IO.read("data/weather/weather")
    precip_stored =  IO.read("data/weather/precip")
    temp_stored = IO.read("data/weather/temp")
    temperature = u'°'
    __draw_text(display, 250, (255, 255, 255), x, y, temp_stored + temperature)

    if weather_stored.__contains__("Fair") or weather_stored.__contains__("Sun") or weather_stored.__contains__(
            "Clear"):
        weather_image=sunny
    elif weather_stored.__contains__("Cloud"):
        weather_image=cloudy
    elif weather_stored.__contains__("Rain"):
        weather_image=rainy
    else:
        weather_image=unknown
    display.blit(weather_image, (x-150, y))
    display.blit(raindrop, (x-50, y+130))
    curr_precip = precip_stored + "%"
    __draw_text(display, 40, (255, 255, 255), x-50-sizeString(curr_precip, 40), y + 135, curr_precip)
    if IO.exists("data/weather/day0/wind"):
        curr_wind = IO.read("data/weather/day0/wind").split(" ", 1)[1]
        __draw_text(display, 25, (255, 255, 255), x-50-sizeString(curr_wind, 25), y + 180, curr_wind)
    display.blit(wind, (x-50, y+180))
    """40x40"""

    yPos = 250
    space = 30
    for i in range(1, 7):
        curr_day = "data/weather/day" + repr(i)+"/"
        if not IO.exists(curr_day):
            break;
        col_val = 255-6*(i-1)*(i-1)
        col=(col_val,col_val,col_val)
        __draw_text(display, 30, col, x + 140, y + yPos + space * i, IO.read(curr_day + "dayotw"))
        if IO.read(curr_day+ "precip")== "0":
            sun_small.set_alpha(col_val)
            display.blit(sun_small, (x+210, y+yPos+space*i))
            sun_small.set_alpha(255)
        else:
            rain_small.set_alpha(col_val)
            display.blit(rain_small, (x+210, y+yPos+space*i))
            rain_small.set_alpha(255)
        __draw_text(display, 30, col, x + 250, y + yPos + space * i, IO.read(curr_day + "lo") + " - " + IO.read(curr_day + "hi"))

    """
예제 #55
0
def write_qcalc(dcd):
    with open(s.INPUT_DIR+ '/qcalc.inp') as infile,          \
         open('qcalc_tmp.inp', 'w') as outfile:
        for line in infile:
            line = IO.replace(line, replacements)
            if line.rstrip() == 'TRAJECTORIES':
                for trajectory in dcd:
                    trajectory = trajectory + '\n'
                    outfile.write(trajectory)
                continue

            outfile.write(line)
예제 #56
0
def main():
    open_path = r'/home/shaopf/study/BiaoBeiData/BZNSYP/Prosody.txt'
    save_path = r'/home/shaopf/study/BiaoBeiData/BZNSYP/fulllab2'
    mono_path = r'/home/shaopf/study/BiaoBeiData/BZNSYP/monolab'
    open_lines = IO.read(open_path)
    if not os.path.exists(save_path):
        os.mkdir(save_path)

    vowel_path = r'/home/shaopf/study/BiaoBeiData/BZNSYP/Vowel.lst'
    init_vowels(vowel_path)

    for line_index in range(0, len(open_lines), 3):
        print(open_lines[line_index].strip())
        save_fulllab_path = save_path + "/" + open_lines[line_index].strip(
        ) + ".lab"
        mono_lab_path = mono_path + "/" + open_lines[line_index].strip(
        ) + ".lab"
        phon_line = open_lines[line_index + 2].strip()
        read(phon_line)
        monos = IO.readList(mono_lab_path)
        write(save_fulllab_path, monos)
예제 #57
0
    def add_infinite(self, tm, reason):
        """Note an infinite TM. Add statistics and output it with reason."""
        self.num_infinite += 1
        self.inf_type[reason] += 1
        self.tm_num += 1

        if self.pout:
            io_record = IO.Record()
            io_record.ttable = tm.get_TTable()
            io_record.category = Exit_Condition.INFINITE
            io_record.category_reason = (reason, )
            self.io.write_record(io_record)
예제 #58
0
    def __init__(self, device_info, controller):
        #Ioservice
        Arm.io = IO.IoService(controller)

        #set joint constraints
        duaro_plan.LowerArmPath.init_path_constraints(1, 55.0, -20.0, 1)
        duaro_plan.LowerArmPath.init_path_constraints(2, 140.0, 15.0, 0.5)
        #duaro_plan.LowerArmPath.init_path_constraints(3, 0.1499, 0.0, 0.1)
        duaro_plan.LowerArmPath.init_path_constraints(4, -35.0, -100.0, 0.5)

        #init_cylinder
        self.ctrl_cylinder("UP")
예제 #59
0
 def source_button_action(self):
     self.source = pathlib.Path(filedialog.askdirectory())
     self.entry_source.config(state='normal')
     self.entry_source.delete(0, tkinter.END)
     self.entry_source.insert(0, self.source)
     self.entry_source.config(state='readonly')
     self.source_directory = IO.Directory(self.source)
     self.files = self.source_directory.recursvieFileTypeSearch('csv')
     self.label_file_count['text']='{} files.'.format(len(self.files))
     for i in self.files:
         self.file_list.insert(tkinter.END, str(i)+'\n')
     self.master.focus()
예제 #60
0
def test_from_file(filename, line, cutoff, block_size, offset):
  ttable = IO.load_TTable_filename(filename, line)
  if VERBOSE:
    for term in ttable:
      print term
    print
  if test_CTL(ttable, cutoff, block_size, offset):
    if VERBOSE:
      print "Success :)"
  else:
    if VERBOSE:
      print "Failure :("