Example #1
0
def saveGenomes(j):
    output = open('actordata.pkl'  + str(j), 'wb')
    data1 = actorPopulation.getActorPickles()
    data = [data1]
 #   pprintpprint(data1)
    pickle.dump(data, output, -1)
    output.close()
Example #2
0
	def makeHist(self, normalize = True, doPMF = True):
		if self.isDataPickled:
			return

		if not self.Dim == 1:
			raise TypeError('Variable # mismatch')

		z = self.z
		Nframes = len(z)
		bin_min = 0.98 * z.min(); bin_max = 1.02*z.max()
		delta = (bin_max - bin_min)/float(self.nbins)
		bin_centers = np.zeros(self.nbins)
		bin_vals = np.zeros(self.nbins)
		pmf = np.zeros(self.nbins)
		for i in range(self.nbins):
			bin_centers[i] = bin_min + (i+0.5) * delta
			
		frameStatus = pb(Text = 'Binning frame by frame', Steps = Nframes)
		for i in range(Nframes):
		
			assignment = int((z[i] - bin_min)/delta)
			bin_vals[assignment] += 1.0
		
			frameStatus.Update(i)
		
		if normalize:
			#bin_vals /= (np.sum(bin_vals) * delta)
			bin_vals /= np.trapz(bin_vals, bin_centers, dx = delta)
		if doPMF:
			pmf = - np.log(bin_vals)
		

		hist = {'bin_centers': bin_centers, 'bin_vals': bin_vals, 'pmf' : pmf}
		pickle.dump(hist, open(self.data, 'w'))
		self.isDataPickled = True
def main():
    attrs = ('high', 'low', 'avg', 'vol', 'vol_cur', 'last',
             'buy', 'sell', 'updated', 'server_time')
    
    #initialize connection
    connection = btceapi.BTCEConnection()
    
    f = open('/media/Big Daddy/New_Documents/python_data/ltc_btc_depth.pkl', 'ab')     
    while 1:
       
        #sleep for .5 seconds, i.e. collect at 2Hz
        time.sleep(1)
    
        try:
            #get ticker
            ticker = btceapi.getTicker("ltc_btc", connection)
            #get asks/bids
            asks, bids = btceapi.getDepth("ltc_btc")
            ask_prices, ask_volumes = zip(*asks)
            bid_prices, bid_volumes = zip(*bids)
        
            #start list with all of the ticker info
            curTrades = trades(coin='ltc',updated=ticker.updated,server_time=ticker.server_time,ask_prices=ask_prices,ask_volumes=ask_volumes,bid_prices=bid_prices,bid_volumes=bid_volumes,buy=ticker.buy,sell=ticker.sell)
            #print out_list
            #now we have a huge list with all the info, write to a single line in the csv file
            
            # Pickle class using protocol 0.
            pickle.dump(curTrades,f)    
        
            #if connection is lost, just try to reconnect (this does seem to happen, so this line is actually pretty important for long data collects)
        except:
            connection = btceapi.BTCEConnection()
            pass
    def handleExportEntityTree(self):
        try:
            selectedEntId = self.selectedEntity.entId
        except AttributeError:
            self.editor.showWarning('Please select a valid entity first.', 'error')
            return

        import tkFileDialog
        filename = tkFileDialog.asksaveasfilename(parent=self.editor.parent, defaultextension='.egroup', filetypes=[('Entity Group', '.egroup'), ('All Files', '*')])
        if len(filename) == 0:
            return
        eTree = {}
        eGroup = {}

        def addEntity(entId, treeEntry):
            treeEntry[entId] = {}
            eGroup[entId] = self.levelSpec.getEntitySpecCopy(entId)
            entity = self.getEntity(entId)
            for child in entity.getChildren():
                addEntity(child.entId, treeEntry[entId])

        addEntity(selectedEntId, eTree)
        for entId, spec in eGroup.items():
            eGroup[entId] = self.specPrePickle(spec)

        try:
            import pickle
            f = open(filename, 'w')
            pickle.dump(eTree, f)
            pickle.dump(eGroup, f)
        except:
            self.editor.showWarning("Error exporting entity group to '%s'." % filename, 'error')
            return
Example #5
0
def testFolder(inputfolder, outputfolder, decisionThreshold = cfg.decision_threshold, applyNMS=True):

    fileList = os.listdir(inputfolder)
    imagesList = filter(lambda element: '.jpg' in element, fileList)

    print 'Start processing '+inputfolder

    start = time()
    for filename in imagesList:

        imagepath = inputfolder + '/' + filename
        print 'Processing '+imagepath

        #Test the current image
        bboxes, scores = testImage(imagepath, decisionThreshold=decisionThreshold, applyNMS=applyNMS)

        #Store the result in a dictionary
        result = dict()
        result['imagepath'] = imagepath
        result['bboxes'] = bboxes
        result['scores'] = scores

        #Save the features to a file using pickle
        outputFile = open(outputfolder+'/'+filename+'_'+'-'.join(cfg.featuresToExtract)+'_'+cfg.model+'.results', "wb")
        pickle.dump(result, outputFile)
        outputFile.close()
    elapsed_time = time() - start
    print('Time elapsed using regular function:  ', elapsed_time)
def main():
    idir, ofile, dffile = _parse_cmdline()

    print u'Loading doc-freqs file {}...'.format(dffile)
    with open(dffile, 'rb') as f:
        df = pickle.load(f)    

    print u'Reading input directory: {}'.format(idir)
    jobs = _load_jobs(idir, df)

    # Do the work.
    pool = Pool(4)
    njobs = len(jobs)

    try:
        import sys
        with codecs.open(ofile, 'wb') as pf:
            pickle.dump(njobs, pf)
            results = pool.imap_unordered(worker, jobs)
            for i, result in enumerate(results, 1):
                pickle.dump(result, pf)
                per = 100 * (float(i) / njobs)
                sys.stdout.write(u'\rPercent Complete: {:2.3f}%'.format(per))
                sys.stdout.flush()
            sys.stdout.write(u'\rPercent Complete: 100%    \n')
            sys.stdout.flush()

    except KeyboardInterrupt:
        sys.stdout.write(u'\rPercent Complete: {:2.3f}%    \n'.format(per))
        sys.stdout.write(u'Shutting down.\n')
        sys.stdout.flush()
        sys.exit()

    print u'Complete!'
def download_deps(from_directory='abc'):
	localHash=0
	remoteEtag=0
	hash=-1	
	etag=-1
	cacheInfo = os.path.realpath(ZK_DESTPATH + '/' + ZK_CACHE_FILENAME)
	if (os.path.exists(ZK_FULLPATH)):
		localHash=hash_file(ZK_FULLPATH)
		remoteEtag=get_etag(ZK_DOWNLOAD_URL)
		if (os.path.exists(cacheInfo)):
			with open(cacheInfo, 'rb') as f:
				etag, hash = pickle.load(f)
	if ((etag==remoteEtag) and (hash==localHash)):
		print os.path.realpath(ZK_DESTPATH) + " is ready"
	else:
		print "Downloading file from " + ZK_DOWNLOAD_URL + " to " + ZK_DESTPATH
		etag = download_file(ZK_DOWNLOAD_URL, ZK_DESTPATH)
		hash = hash_file(ZK_FULLPATH)
		
		print "Saving download information to : " + os.path.realpath(ZK_DESTPATH + '/' + ZK_CACHE_FILENAME)	
		with open(cacheInfo, 'wb') as f:
			pickle.dump([etag, hash], f, -1)
			
		print "Extracting " + ZK_FULLNAME + " has been extracted"
		extract_file(ZK_FULLNAME, SCRIPT_PATH + "/" + ZK_DEPS_NAME + "/")
Example #8
0
    def del_by_name(self, name):
        '''
        Deletes cloud instances list from backup file given name

        Parameter:
            name -- cluster name

        Logic:
            Loads content from backup file, given content is list, then
            delete the cluster which matches name using remove, then
            dump the new content into backup file

        No returns
        '''

        src_file = open(os.path.expanduser(self.backup_file), "r")
        cloud_list = pickle.load(src_file)
        for cloud in cloud_list:
            if cloud['name'] == name:
                # remove cluster if game matches
                cloud_list.remove(cloud)
                src_file = open(os.path.expanduser(self.backup_file), "w")
                pickle.dump(cloud_list, src_file)
                src_file.close()
                return
Example #9
0
 def __init__(self, config, cache_folder_path):
     super(BierDopje, self).__init__(None)
     #http://api.bierdopje.com/23459DC262C0A742/GetShowByName/30+Rock
     #http://api.bierdopje.com/23459DC262C0A742/GetAllSubsFor/94/5/1/en (30 rock, season 5, episode 1)
     self.api = None
     try:
         key = config.get("BierDopje", "key") # You need to ask for it
         if key != "":
             self.api = "http://api.bierdopje.com/%s/" %key
     except ConfigParser.NoSectionError:
         config.add_section("BierDopje")
         config.set("BierDopje", "key", "")
         config_file = os.path.join(cache_folder_path, "config")
         configfile = open(config_file, "w")
         config.write(configfile)
         configfile.close()
         return
     self.headers = {'User-Agent' : 'periscope/%s' % version.VERSION}
     self.cache_path = os.path.join(cache_folder_path, "bierdopje.cache")
     if not os.path.exists(self.cache_path):
         log.info("Creating cache file %s" % self.cache_path)
         f = open(self.cache_path, 'w')
         pickle.dump({'showids' : {}}, f)
         f.close()
     f = open(self.cache_path, 'r')
     self.cache = pickle.load(f)
     f.close()
Example #10
0
def Get_All_Teams():
    data_path = '../data/'
    # get the teams
    url = 'http://espn.go.com/nba/teams'
    html = urllib.urlopen(url).read()
    soup = BeautifulSoup(html, 'lxml')
    # print (soup.prettify())
    tables = soup.find_all('ul', class_ = 'medium-logos')

    tables[0].find_all('li')[0].h5.a

    name_pref_Tuples = []
    city_name_Dict = {}
    for table in tables:
        lis = table.find_all('li')
        for li in lis:
            info = li.h5.a
            team_url = info['href']
            team_name = info.text
            pref = team_url.split('/')[-2]
            city_name = ' '.join(info.text.split()[:-1])
            if team_name == 'Portland Trail Blazers':
                city_name = 'Portland'
            city_name_Dict[city_name] = team_name
            name_pref_Tuples.append((team_name, pref))

    print 'output two files: city_name.pickle and name_pref.pickle'
    print 'city_name.pickle is a dict with (city, team_name) pairs'
    print 'name_pref.pickle is a list of (team_name, team_name_prefix) tuples'
    pk.dump(city_name_Dict, open(data_path + 'city_name.pickle', 'wb'))
    pk.dump(name_pref_Tuples, open(data_path + 'name_pref.pickle', 'wb'))
Example #11
0
 def single_run():
     # Helper to ensure case someone does MPI on a single diretory
     output = analyze.analyze_directory(args['--store'], **analyzer_kwargs)
     if do_serialize:
         with open(args['--serial'], 'wb') as f:
             pickle.dump(output, f)
         print("Results have been serialized to {}".format(args['--serial']))
Example #12
0
def run():
    start_time = time.time()
    inputfolder = cfg.testFolderPath
    outputfolder = cfg.resultsFolder
    fileList = os.listdir(inputfolder)
    imagesList = filter(lambda element: '.jpg' in element, fileList)

    gt = []
    predictions = []

    print 'Start processing '+inputfolder

    results = Parallel(n_jobs=4)(delayed(testFolder)(filename, inputfolder) for filename in imagesList)

    for i in range(len(results)):
        gt.append(results[i][0])
        predictions.append(results[i][1])

    #Store the result in a dictionary
    result = dict()
    result['gt'] = gt
    result['predictions'] = predictions

    #Save the features to a file using pickle
    if not os.path.exists(cfg.resultsFolder):
        os.makedirs(cfg.resultsFolder)
    outputFile = open(outputfolder+'/'+'-'.join(cfg.featuresToExtract)+'_'+cfg.model+'.results', "wb")
    pickle.dump(result, outputFile)
    outputFile.close()

    print("FINISH PROCESS  --- %s seconds ---" % (time.time() - start_time))
def save_mesh(signal):
    if not nodolist.run:
        datos = nodolist, link_color24, link_color50
        with open(dir_trabajo + '/data.ms', 'wb') as f:
                pickle.dump(datos, f)
    else:
        message_stop()
Example #14
0
def test_pickle_model():
    tmpdir = tempfile.mkdtemp()
    pickle_file = os.path.join(tmpdir, 'stanmodel.pkl')
    model_code = 'parameters {real y;} model {y ~ normal(0,1);}'
    m = pystan.StanModel(model_code=model_code, model_name="normal1",
                         save_dso=False)
    module_name = m.module.__name__
    with open(pickle_file, 'wb') as f:
        pickle.dump(m, f)
    del m
    del sys.modules[module_name]

    with open(pickle_file, 'rb') as f:
        m = pickle.load(f)
    assert m.model_name == "normal1"

    m = pystan.StanModel(model_code=model_code, model_name="normal2")
    module_name = m.module.__name__
    module_filename = m.module.__file__
    with open(pickle_file, 'wb') as f:
        pickle.dump(m, f)
    del m
    del sys.modules[module_name]

    with open(pickle_file, 'rb') as f:
        m = pickle.load(f)
    assert m.model_name == "normal2"
    assert m.module is not None
    assert module_filename != m.module.__file__
    fit = m.sampling()
    y = fit.extract()['y']
    assert len(y) == 4000
Example #15
0
def interpro_result(interpro_submit_sequences, email, developing, script_dir):
    protein_ipr_db_domain = {}
    # this is done per 25
    for protein_name, interpro_result in iprscan_soappy.runInterpro(interpro_submit_sequences, email):					# get dict with as value protein name and as value various stuff
        ipr_domain_names = []
        protein_ipr_db_domain[protein_name] = {}

        for ipr_code in interpro_result:
            # list of ipr domain names for this protein
            if 'ipr_names' in interpro_result[ipr_code]:
                ipr_domain_names += interpro_result[ipr_code]['ipr_names']
            for database in interpro_result[ipr_code]:
                protein_ipr_db_domain[protein_name][ipr_code] = {database:interpro_result[ipr_code][database]}	 # update it with database and database specific name
                # make a separate list for PFAM domains, because these are used later
        protein_ipr_db_domain[protein_name]['ipr_domain_names'] = ipr_domain_names
        if developing:
            try:
                interpro_file = script_dir+os.sep+'interpro_results'+os.sep+fix_file_names(protein_name.split('|')[0].strip()+'_interpro.p')
                f = open( interpro_file, 'wb' )
                pickle.dump( protein_ipr_db_domain[protein_name], f )
                print 'wrote interpro data to '+interpro_file
            except:
                print traceback.print_exc()
                pass
    return protein_ipr_db_domain
Example #16
0
    def _upload_all_nonatomic(self, items, suffix=""):
        """Upload a new set of items.

        This takes a list of vobject items and
        uploads them nonatomic and without existence checks.

        """
        cache_folder = os.path.join(self._filesystem_path,
                                    ".Radicale.cache", "item")
        self._makedirs_synced(cache_folder)
        hrefs = set()
        for item in items:
            uid = item.uid
            try:
                cache_content = self._item_cache_content(item)
            except Exception as e:
                raise ValueError(
                    "Failed to store item %r in temporary collection %r: %s" %
                    (uid, self.path, e)) from e
            href_candidates = []
            if os.name in ("nt", "posix"):
                href_candidates.append(
                    lambda: uid if uid.lower().endswith(suffix.lower())
                    else uid + suffix)
            href_candidates.extend((
                lambda: radicale_item.get_etag(uid).strip('"') + suffix,
                lambda: radicale_item.find_available_uid(hrefs.__contains__,
                                                         suffix)))
            href = None

            def replace_fn(source, target):
                nonlocal href
                while href_candidates:
                    href = href_candidates.pop(0)()
                    if href in hrefs:
                        continue
                    if not pathutils.is_safe_filesystem_path_component(href):
                        if not href_candidates:
                            raise pathutils.UnsafePathError(href)
                        continue
                    try:
                        return os.replace(source, pathutils.path_to_filesystem(
                            self._filesystem_path, href))
                    except OSError as e:
                        if href_candidates and (
                                os.name == "posix" and e.errno == 22 or
                                os.name == "nt" and e.errno == 123):
                            continue
                        raise

            with self._atomic_write(os.path.join(self._filesystem_path, "ign"),
                                    newline="", sync_directory=False,
                                    replace_fn=replace_fn) as f:
                f.write(item.serialize())
            hrefs.add(href)
            with self._atomic_write(os.path.join(cache_folder, href), "wb",
                                    sync_directory=False) as f:
                pickle.dump(cache_content, f)
        self._sync_directory(cache_folder)
        self._sync_directory(self._filesystem_path)
Example #17
0
 def dump_file(self, content):
     counter = 0
     while True:
         while True:
             try:
                 fl = open(self.PATH+'.tmp', 'wb')
                 pickle.dump(content, fl)
                 fl.close()
                 fl = open(self.PATH+'.tmp','rb')
                 h2 = pickle.load(fl)
                 fl.close()
                 assert h2 == content
                 break
             except:
                 #print '\nThere was an error dumping the history!\n'\
                 #'This happened %d times so far, trying again...'%(counter)
                 counter+=1
         try:
             if os.path.exists(self.PATH):
                 os.remove(self.PATH)
             os.rename(self.PATH+'.tmp',self.PATH)
             fl = open(self.PATH,'rb')
             h2 = pickle.load(fl)
             fl.close()
             assert h2 == content
             break
         except:
             pass
Example #18
0
 def add_var(self, var_name, eqn_text, do_save=True):
     """Add to the variables bubble list and dictionary.
     
     Arguments:
     - `self`:
     - `var_name`:
     - `eqn_text`:
     """
     # This function was actually created and modeled after
     # self.add_to_history.  Relevant comments can be found there.
     var_exists = var_name in self.var_dict
     self.var_dict[var_name] = eqn_text
     if do_save:
         with open(VARDB_FILE, "wb") as var_file:
             pickle.dump(self.var_dict, var_file)
     if not var_exists:
         new_btn = BubbleButton(text=var_name)
         last_pos = len(self.var_dict)
         new_btn.bind(on_press=lambda *args: self.set_eqn(self.var_dict[var_name], len(self.history_stack) + 1))
         try:
             kivy.require("1.4.2")
             self.var_list_bubble.content.add_widget(new_btn, last_pos + 1)
         except Exception:
             self.var_list_bubble.content.clear_widgets()
             self.var_list_bubble.content.add_widget(new_btn)
             for dice_roll in reversed(self.var_list_stack):
                 dice_bubble = BubbleButton(text=dice_roll)
                 dice_bubble.bind(on_press=self.var_dict[dice_roll])
                 self.var_list_bubble.content.add_widget(dice_bubble)
             self.var_list_stack.append(var_name)
         if not hasattr(self, "bubble_height_var"):
             self.bubble_height_var = self.dice_eqn_input.height
         else:
             self.var_list_bubble.height += self.bubble_height_var
             self.var_list_bubble.parent.height += self.bubble_height_var
Example #19
0
    def _parse_file( self ):

        cdd = dict()
        with open( files[2] ) as cddTree:
            with open( files[4] ) as cddAttr:
                cdd  = gh.createCDDtree( cddTree, cddAttr, False )

        #pp.pprint(cdd['ByPssm'])        
        pickle.dump( cdd, open( files[6], 'wb' ))
        fout = open( files[5], 'wt' )
        fout.write( '\t'.join([ 'Pssm', 'Acc', 'Name', 'Desc', 'Root', 'Sub', 'Super' ]) + "\n" )
        for d in list( cdd['ByPssm'].values()) :
            sub = ''
            if type( d['Sub']) is list:
#                print( 'yes, sub is a list' )
                sub = ';'.join( [ d['Sub'][i]['Pssm'] for i in range(len(d['Sub'])) ] )

#            print(  type(d['Sub'])  )
            sup = '' #d['Root']['Pssm']
            if isinstance( d['Super'], dict ):
#                print('yes, super is a dict')
                sup = d['Super']['Pssm']

            fout.write( '\t'.join([ d['Pssm'], d['Acc'], d['Name'], d['Desc'],
                                    d['Root']['Pssm'], sub, sup ]) + "\n")
#            print( type(d['Super']) )
#            print( d['Desc'])
        fout.close()
Example #20
0
def pickleFunc(fname, senthash, type):
	if type == "save":
		with open(fname, mode='wb') as f:
			pickle.dump(senthash, f)
	elif type == "load":
		with open(fname, mode='rb') as f:
			return pickle.load(f)
Example #21
0
def getCountry(text, one, two, con):
    posone = 0
    country = {}
    while(posone >= 0):
        posone = text.find(one, posone)
        postwo = text.find(two, posone)
        if posone < 0:
            break
        ss = text[posone:postwo]
        start = ss.find('"')
        end = ss.find('"', start+1)
        link = url + ss[start+8:end] + 'explore/'
        print link
        name = ss[end+2:]
        city = getCity(link, name)
        print name
        #country[name] = link
        country[name] = (link, city)
        tmp = open(name, 'w')
        pickle.dump(country, tmp)
        tmp.close()
        posone = postwo
    tmp = open(con, "w")
    pickle.dump(country, tmp)
    tmp.close()
Example #22
0
def sort_warheroes_list():
    data_list = pickle.load(open("war_base", "rb"))
    war_base_url = "http://www.warheroes.ru/hero/hero.asp?Hero_id="
    session = requests.session()
    for unit in data_list:
        num = unit[0]
        title = unit[1]
        print(unit)
        id = str(num)
        war_url = war_base_url + id
        is_get_res = False
        try_count = 0
        while not is_get_res and try_count < 10:
            try:
                res = session.get(war_url)
                war_data = res.text
                is_get_res = True
            except:
                try_count += 1
                time.sleep(1)
        if is_otrs_exists(war_data):
            try:
                print("  - otrs exists")
                good_list = pickle.load(open("free_war_base", "rb"))
                good_list.append(unit)
                pickle.dump(good_list, open("free_war_base", "wb"))
            except:
                tools.debug_print("Bad pickle save3. "+id)
        else:
            try:
                bad_list = pickle.load(open("nonfree_war_base", "rb"))
                bad_list.append(unit)
                pickle.dump(bad_list, open("nonfree_war_base", "wb"))
            except:
                tools.debug_print("Bad pickle save3. "+id)
Example #23
0
 def checkFiles(self):
     check_files=[]
     i=0
     for movfolder in self.movDirectory:
         print movfolder
         for root, dirs, files in os.walk(movfolder):
             for file in files:
                 i=i+1
                 self.updateProgressString(i)
                 if(self.isMovFile(file)):
                     fileURL = os.path.join(root, file)
                     if (not os.path.islink(fileURL)):
                         check_files.append(fileURL)
     historyMovFile = self.getHistoryMovFileName()
     if(check_files==self.MOV_FILES):
         checkResult = "correct"
     else:
         checkResult = "incorrect"
         outfile = open(historyMovFile,"wb")
         pickle.dump(check_files, outfile,2)
         outfile.close()
     if os.name == "nt":
         print checkResult
     else:
         tkMessageBox.showinfo( "Movie DB Verify", checkResult+' , Movie DB Verify Finish.')
Example #24
0
 def sortFilesByFileSize(self):
     originalFiles=[]
     historyMovFile = self.getHistoryMovFileName()
     if(os.path.exists(historyMovFile)):
         infile = open(historyMovFile,"rb")
         originalFiles = pickle.load(infile)
         infile.close()
     sortFiles = []
     finalFiles = []
     for file in originalFiles:
         if(self.isMovFile(file)):
             sortFiles.append(os.path.getsize(file))
     ## print sortFiles
     
     originalSize = len(originalFiles)
     for i in range(originalSize):
         maxValue=max(sortFiles)
         k=sortFiles.index(maxValue)
         finalFiles.append(originalFiles[k])
         sortFiles.remove(maxValue)
         originalFiles.remove(originalFiles[k])
     ## print finalFiles
     
     outfile = open(historyMovFile,"wb")
     pickle.dump(finalFiles, outfile,2)
     outfile.close()
     self.showAllImage()
Example #25
0
    def sortFilesByName(self):
        originalFiles=[]
        historyMovFile = self.getHistoryMovFileName()
        if(os.path.exists(historyMovFile)):
            infile = open(historyMovFile,"rb")
            originalFiles = pickle.load(infile)
            infile.close()
        sortFiles = []
        finalFiles = []
        for file in originalFiles:
            if(self.isMovFile(file)):
                sortFiles.append(os.path.split(file)[-1])
        sortFiles=list(set(sortFiles))
        sortFiles.sort()
        ## print sortFiles

        for file in sortFiles:
            for oriFile in originalFiles:
                if oriFile.find(file) != -1:
                    finalFiles.append(oriFile)
        ## print finalFiles
        
        outfile = open(historyMovFile,"wb")
        pickle.dump(finalFiles, outfile,2)
        outfile.close()
        self.showAllImage()
Example #26
0
    def saveFavoriteFile(self,filename):
        originalFiles = []
        newFavorite = True
        favoriteFileName = self.getFavoriteFileName()
        if(os.path.exists(favoriteFileName)):
            infile = open(favoriteFileName,"rb")
            originalFiles = pickle.load(infile)
            infile.close()       
        favoriteLen = len(originalFiles)
        if favoriteLen == 0:
            originalFiles.append([1,filename])
        else:
            for i in range(favoriteLen):
                if filename in originalFiles[i]:
                    newFavorite=False
                    originalFiles[i][0] = originalFiles[i][0] + 1
            if newFavorite:
                originalFiles.append([1,filename])

        originalFiles.sort()
        originalFiles.reverse()
        ## print originalFiles
        
        outfile = open(favoriteFileName,"wb")
        pickle.dump(originalFiles, outfile,2)
        outfile.close()
Example #27
0
    def sortFilesByPlayback(self):
        originalFiles=[]
        historyMovFile = self.getHistoryMovFileName()
        if(os.path.exists(historyMovFile)):
            infile = open(historyMovFile,"rb")
            originalFiles = pickle.load(infile)
            infile.close()

        finalFiles = []
        favoriteFiles = []
        favoriteFileName = self.getFavoriteFileName()
        if(os.path.exists(favoriteFileName)):
            infile = open(favoriteFileName,"rb")
            favoriteFiles = pickle.load(infile)
            infile.close()       
        favoriteLen = len(favoriteFiles)
        originalLen = len(originalFiles)
        if favoriteLen == 0 or originalLen == 0:
            return
        else:
            for i in range(favoriteLen):
                try:
                    finalFiles.append(favoriteFiles[i][1])
                    originalFiles.remove(favoriteFiles[i][1])
                except:
                    pass
                    

        finalFiles = finalFiles + originalFiles
        ## print finalFiles
     
        outfile = open(historyMovFile,"wb")
        pickle.dump(finalFiles, outfile,2)
        outfile.close()
        self.showAllImage()
Example #28
0
def login():
    # correct first: {"login":{"result":"NeedToken","token":"xxx","cookieprefix":"ruwiki","sessionid":"xxx"}}
    # correct 2nd: {"login":{"result":"Success","lguserid":1151236,"lgusername":"******","lgtoken":"xxx","cookieprefix":"ruwiki","sessionid":"xxx"}}
    # bad 2nd: {"login":{"result":"WrongPass"}}
    # bad 2nd: {"login":{"result":"NotExists"}}
    username = "******"
    userpass = "******"

    try:
        session = requests.Session()
        login_url = "https://ru.wikipedia.org/w/api.php?action=login&format=json"
        post_data1 = {'lgname': username, 'lgpassword': userpass}
        res1 = session.post(login_url, post_data1)
        result1 = res1.json()["login"]["result"]
        if result1 != "NeedToken":
            tools.debug_print("ACHTUNG! Bad login try. Wrong 1st result.")
            return False
        token1 = res1.json()["login"]["token"]
        cookieprefix1 = res1.json()["login"]["cookieprefix"]
        sessionid1 =  res1.json()["login"]["sessionid"]
    except:
        tools.debug_print("EXCEPTION! Bad login try. 1st part.")
        return False

    try:
        post_data2 = post_data1
        post_data2["lgtoken"] = token1
        res2 = session.post(login_url, post_data2)
        result2 = res2.json()["login"]["result"]
        if result2 != "Success":
            tools.debug_print("ACHTUNG! Bad login try. Wrong 2nd result.")
            return False
        userid2 = res2.json()["login"]["lguserid"]
        username2 = res2.json()["login"]["lgusername"]
        if username2 != username:
            tools.debug_print("ACHTUNG! Bad login try. Get wrong username.")
            return False
        token2 = res2.json()["login"]["lgtoken"]
        cookieprefix2 = res2.json()["login"]["cookieprefix"]
        if cookieprefix2 != cookieprefix1:
            tools.debug_print("ACHTUNG! Bad login try. Wrong cookie prefix.")
            return False
        sessionid2 =  res2.json()["login"]["sessionid"]
        if sessionid2 != sessionid1:
            tools.debug_print("ACHTUNG! Bad login try. Wrong session id.")
            return False
    except:
        tools.debug_print("EXCEPTION! Bad login try. 2nd part.")
        return False

    try:
        f = open("session_keeper", "wb")
        pickle.dump(session, f)
        f.close()
    except:
        tools.debug_print("EXCEPTION! Can't save session.")
        return False

    tools.debug_print("Success login. User: " + username)
    return True
Example #29
0
 def orderrun_detail(self, kitchen, pdict, return_all_data=False):
     """
     api.add_resource(OrderDetailsV2, '/v2/order/details/<string:kitchenname>', methods=['POST'])
     :param self: DKCloudAPI
     :param kitchen: string
     :param pdict: dict
     :param return_all_data: boolean
     :rtype: DKReturnCode
     """
     rc = DKReturnCode()
     if kitchen is None or isinstance(kitchen, basestring) is False:
         rc.set(rc.DK_FAIL, 'issue with kitchen')
         return rc
     url = '%s/v2/order/details/%s' % (self.get_url_for_direct_rest_call(),
                                       kitchen)
     try:
         response = requests.post(url, data=json.dumps(pdict), headers=self._get_common_headers())
         rdict = self._get_json(response)
         if False:
             import pickle
             pickle.dump(rdict, open("files/orderrun_detail.p", "wb"))
         pass
     except (RequestException, ValueError), c:
         s = "orderrun_detail: exception: %s" % str(c)
         rc.set(rc.DK_FAIL, s)
         return rc
Example #30
0
def write_session(session):
	try:
		pickle.dump(session, open(session_file_name, 'w'))
		open(session_checksum_name, 'w').write(md5(open(session_file_name).read()).hexdigest())
	except:
		print "Saving session failed!\n Check if you have permission to access \"{0}\" and \"{1}\"".format(session_file_name, session_checksum_name)
		exit(1)
    solution = np.empty((repeat, sig_scale.shape[0]), object)
    for i in np.arange(repeat):
        for j, sigma_inv in enumerate(sig_scale):
            # sig_inv = np.ones(2)*sigma_inv
            sig_inv = np.ones(2) * sigma_inv
            sig_inv[0] = extreme
            solver = EGO(sig_inv, obj, bounds, max_iter, num_ini_guess)
            solution_X, solution_y = solver.solve()
            solution[i, j] = (solution_X, solution_y)

    # save the solution
    with open(file_address, 'w') as f:
        pickle.dump(
            {
                'solution': solution.tolist(),
                'extreme': extreme,
                'obj_name': obj_name,
                'max_iter': max_iter
            }, f)
    f.close()
else:
    with open(file_address, 'r') as f:
        data = pickle.load(f)
    f.close()
    solution = np.array(data['solution'])
    # sig_scale = np.array(data['sig_scale'])
    max_iter = data['max_iter']

    # solution_X = np.array(solution['X'])
    # solution_y = np.array(solution['y'])
Example #32
0
 def pickle(self, fileName):
     """Save the pickled multiarray in the given file"""
     outputStream = open(fileName, 'wb')
     pickle.dump(self, outputStream)
     outputStream.close()
def save_experiment_data(dictionary, log_dir):
    with open(log_dir + '/experiment.pkl', 'wb') as handle:
        pickle.dump(dictionary, handle, protocol=pickle.HIGHEST_PROTOCOL)
def prepare_data(gt_2d_bdb=False, patch_h=224, patch_w=224, shift=True, iou_threshold=0.1):
    """
        Generating the ground truth for end-to-end training

        Parameters
        ----------
        gt_2d_bdb : bool
            indicates whether to use the ground truth of 2D bounding boxes
        patch_h: int
            the height of target resized patch
        patch_w: int
            the width of target resized potch
        iou_threshold : float
            iou threshold for two 2D bounding boxes
    """
    bin = PATH.bins()
    data_root = op.join(PATH.metadata_root, 'sunrgbd_train_test_data')
    train_path = list()
    test_path = list()
    layout_centroid = list()
    layout_coeffs = list()
    # obj_category = dict()
    if not op.exists(data_root):
        os.mkdir(data_root)
    for i in range(10335):
        sequence = readsunrgbdframe(image_id=i+1)
        print i+1
        sequence._R_tilt = loadmat(op.join(PATH.metadata_root, 'updated_rtilt', str(i+1) + '.mat'))['r_tilt']
        # R_ex is cam to world
        sequence._R_ex = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]]).dot(sequence.R_tilt).dot(np.array([[1, 0, 0], [0, 0, 1], [0, -1, 0]]))
        K = sequence.K
        result = []
        for bdb2d in sequence.bdb2d:
            if check_bdb(bdb2d, 2*sequence.K[0, 2], 2*sequence.K[1, 2]):
                result.append(bdb2d)
            else:
                print 'ground truth not valid'
        sequence._bdb2d = result

        bdb2d_from_3d_list = []
        with open(op.join(PATH.metadata_root, '2dbdb', str(i + 1) + '.json'), 'r') as f:
            detected_bdbs = json.load(f)
        f.close()
        boxes = list()
        for bdb3d in sequence.bdb3d:
            center = bdb3d['centroid'][0]
            coeffs = bdb3d['coeffs'][0]
            basis = bdb3d['basis'].astype('float32')
            if bdb3d['classname'][0] not in OBJ_CATEGORY_CLEAN:
                continue
            bdb2d_from_3d = project_struct_bdb_to_2d(basis, coeffs, center, sequence.R_ex.T, K)
            projected_2d_center = project_3d_points_to_2d(center.reshape(1, 3), sequence.R_ex.T, K)
            if bdb2d_from_3d is None:
                print '%s not valid' % (bdb3d['classname'][0])
                continue
            bdb2d_from_3d['classname'] = bdb3d['classname'][0]
            bdb2d_from_3d_list.append(bdb2d_from_3d)
            if gt_2d_bdb is True:
                max_iou = 0
                iou_ind = -1
                for j, bdb2d in enumerate(sequence.bdb2d):
                    if bdb2d['classname'] == bdb3d['classname'][0]:
                        iou = get_iou(bdb2d_from_3d, bdb2d)
                        if iou > iou_threshold and iou > max_iou:
                            iou_ind = j
                            max_iou = iou
                if iou_ind >= 0:
                    if shift:
                        shifted_box = random_shift_2d_box(sequence.bdb2d[iou_ind])
                        boxes.append({'2dbdb': shifted_box, '3dbdb': bdb3d,
                                      'projected_2d_center': projected_2d_center})
                    else:
                        boxes.append({'2dbdb': sequence.bdb2d[iou_ind], '3dbdb': bdb3d, 'projected_2d_center': projected_2d_center})
            else:
                max_iou = 0
                iou_ind = -1
                max_bdb = dict()
                for j, bdb2d in enumerate(detected_bdbs):
                    if bdb2d['class'] == bdb3d['classname'][0]:
                        box = bdb2d['bbox']
                        box = {'x1': box[0], 'y1': box[1], 'x2': box[2], 'y2': box[3]}
                        iou = get_iou(bdb2d_from_3d, box)
                        if iou > iou_threshold and iou > max_iou:
                            iou_ind = j
                            max_iou = iou
                            box['score'] = bdb2d['score']
                            box['classname'] = bdb2d['class']
                            max_bdb = box
                if iou_ind >= 0:
                    # print max_iou, bdb2d_from_3d, detected_bdbs[iou_ind]
                    if shift:
                        shifted_box = random_shift_2d_box(max_bdb)
                        boxes.append({'2dbdb': shifted_box, '3dbdb': bdb3d, 'projected_2d_center': projected_2d_center})
                    else:
                        boxes.append({'2dbdb': max_bdb, '3dbdb': bdb3d, 'projected_2d_center': projected_2d_center})
        # print boxes
        camera = dict()
        camera_flip = dict()
        camera['yaw_cls'], camera['yaw_reg'], camera['roll_cls'], camera['roll_reg'] = camera_cls_reg(sequence.R_ex.T, bin)
        camera['K'] = sequence.K
        # flip the camera
        camera_flip['yaw_cls'], camera_flip['yaw_reg'], camera_flip['roll_cls'], camera_flip['roll_reg'] = camera_cls_reg(sequence.R_ex.T, bin, flip=True)
        camera_flip['K'] = sequence.K
        template_path = op.join(PATH.metadata_root, 'size_avg_category.pickle')
        layout_pts = loadmat(op.join(PATH.metadata_root, '3dlayout', str(i+1) + '.mat'))['manhattan_layout'].T
        l_centroid, l_basis, l_coeffs = get_bdb_from_corners(layout_pts)
        # print l_centroid
        layout_centroid.append(l_centroid)
        layout_coeffs.append(l_coeffs)
        layout = dict()
        layout['centroid_reg'] = layout_centroid_avg_residual(l_centroid, bin['layout_centroid_avg'], bin['layout_normalize'])
        layout['coeffs_reg'] = layout_size_avg_residual(l_coeffs, bin['layout_coeffs_avg'])
        layout['ori_cls'], layout['ori_reg'] = ori_cls_reg(l_basis[1, :], bin, layout=True)
        layout_flip = dict()
        layout_flip['centroid_reg'] = layout_centroid_avg_residual(l_centroid, bin['layout_centroid_avg'], bin['layout_normalize'], flip=True)
        layout_flip['coeffs_reg'] = layout_size_avg_residual(l_coeffs, bin['layout_coeffs_avg'])
        layout_flip['ori_cls'], layout_flip['ori_reg'] = ori_cls_reg(l_basis[1, :], bin, layout=True, flip=True)
        # print layout['ori_cls'], layout_flip['ori_cls']
        # clean the ground truth
        with open(template_path, 'r') as f:
            size_template = pickle.load(f)
        f.close()
        boxes_out = list()
        boxes_out_flip = list()
        for box in boxes:
            box_set = dict()
            # box_set['ori_cls'], box_set['ori_reg'] = ori_cls_reg(box['3dbdb']['orientation'])
            box_set['ori_cls'], box_set['ori_reg'] = ori_cls_reg(box['3dbdb']['basis'][1, :], bin)
            # print box['3dbdb']['basis']
            # print basis_from_ori(num_from_bins(bin['ori_bin'], box_set['ori_cls'], box_set['ori_reg']))
            box_set['size_reg'] = size_avg_residual(box['3dbdb']['coeffs'][0], size_template, box['2dbdb']['classname'])
            box_set['bdb3d'] = get_corners_of_bb3d_no_index(box['3dbdb']['basis'], box['3dbdb']['coeffs'][0], box['3dbdb']['centroid'][0])
            box_set['x_cls'], box_set['x_reg'], box_set['y_cls'], box_set['y_reg'], box_set['z_cls'], box_set['z_reg'] = centroid_cls_reg(box['3dbdb']['centroid'][0], bin)
            box_set['bdb_pos'] = [box['2dbdb']['x1'], box['2dbdb']['y1'], box['2dbdb']['x2'], box['2dbdb']['y2']]
            box_set['bdb2d'] = [box['2dbdb']['x1'] / float(K[0, 2]), box['2dbdb']['y1'] / float(K[1, 2]), box['2dbdb']['x2'] / float(K[0, 2]), box['2dbdb']['y2'] / float(K[1, 2])]
            box_set['centroid_cls'], box_set['centroid_reg'] = bin_cls_reg(bin['centroid_bin'], np.linalg.norm(box['3dbdb']['centroid'][0]))
            delta_2d = list()
            delta_2d.append(((box_set['bdb_pos'][0] + box_set['bdb_pos'][2]) / 2 - box['projected_2d_center'][0][0]) / (box_set['bdb_pos'][2] - box_set['bdb_pos'][0]))
            delta_2d.append(((box_set['bdb_pos'][1] + box_set['bdb_pos'][3]) / 2 - box['projected_2d_center'][1][0]) / (box_set['bdb_pos'][3] - box_set['bdb_pos'][1]))
            box_set['delta_2d'] = delta_2d
            box_set['size_cls'] = OBJ_CATEGORY_CLEAN.index(box['2dbdb']['classname'])
            # print box_set['size_cls']
            # print box['2dbdb']['classname']
            boxes_out.append(box_set)
            # print box_set['3dbdb']['classname'], box_set['ori_cls'], box_set['ori_reg'], box_set['size_reg'], box_set['size_cls'], box_set['size_reg']
            # flip the boxes
            box_set_flip = dict()
            # box_set_flip['ori_cls'], box_set_flip['ori_reg'] = ori_cls_reg(box['3dbdb']['orientation'], flip=True)
            box_set_flip['ori_cls'], box_set_flip['ori_reg'] = ori_cls_reg(box['3dbdb']['basis'][1, :], bin, flip=True)
            box_set_flip['size_reg'] = size_avg_residual(box['3dbdb']['coeffs'][0], size_template, box['2dbdb']['classname'])
            box_set_flip['x_cls'], box_set_flip['x_reg'], box_set_flip['y_cls'], box_set_flip['y_reg'], box_set_flip['z_cls'], box_set_flip['z_reg'] = centroid_cls_reg(box['3dbdb']['centroid'][0], bin, flip=True)
            box_set_flip['centroid_cls'], box_set_flip['centroid_reg'] = bin_cls_reg(bin['centroid_bin'], np.linalg.norm(box['3dbdb']['centroid'][0]))
            box_set_flip['bdb_pos'] = [int(2 * K[0, 2] - box['2dbdb']['x2']), box['2dbdb']['y1'], int(2 * K[0, 2] - box['2dbdb']['x1']), box['2dbdb']['y2']]
            box_set_flip['bdb2d'] = [int(2 * K[0, 2] - box['2dbdb']['x2']) / float(K[0, 2]), box['2dbdb']['y1'] / float(K[1, 2]),
                                       int(2 * K[0, 2] - box['2dbdb']['x1']) / float(K[0, 2]), box['2dbdb']['y2'] / float(K[1, 2])]
            box_set_flip['size_cls'] = OBJ_CATEGORY_CLEAN.index(box['2dbdb']['classname'])
            coeffs_flip = size_from_template(box_set_flip['size_reg'], size_template, OBJ_CATEGORY_CLEAN[box_set_flip['size_cls']])
            centroid_flip = np.array([num_from_bins(bin['x_bin'], box_set_flip['x_cls'], box_set_flip['x_reg']), num_from_bins(bin['y_bin'], box_set_flip['y_cls'], box_set_flip['y_reg']), num_from_bins(bin['z_bin'], box_set_flip['z_cls'], box_set_flip['z_reg'])])
            basis_flip = basis_from_ori(num_from_bins(bin['ori_bin'], box_set_flip['ori_cls'], box_set_flip['ori_reg']))
            box_set_flip['bdb3d'] = get_corners_of_bb3d(basis_flip, coeffs_flip, centroid_flip)
            delta_2d_flip = [- delta_2d[0], delta_2d[1]]
            box_set_flip['delta_2d'] = delta_2d_flip
            # print box_set['delta_2d'], box_set_flip['delta_2d']
            boxes_out_flip.append(box_set_flip)
        if len(boxes_out) == 0:
            continue
        data = dict()
        data['rgb_path'] = op.join(PATH.metadata_root, 'images', '%06d.jpg' % (i+1))
        data['boxes'] = list_of_dict_to_dict_of_list(boxes_out)
        data['camera'] = camera
        data['layout'] = layout
        data['sequence_id'] = i + 1
        # fliped data
        data_flip = dict()
        data_flip['rgb_path'] = op.join(PATH.metadata_root, 'images', '%06d_flip.jpg' % (i+1))
        # img_flip = Image.open(data['rgb_path']).transpose(Image.FLIP_LEFT_RIGHT)
        # img_flip.save(data_flip['rgb_path'])
        data_flip['boxes'] = list_of_dict_to_dict_of_list(boxes_out_flip)
        data_flip['camera'] = camera_flip
        data_flip['layout'] = layout_flip
        data_flip['sequence_id'] = i + 1
        if shift:
            save_path = op.join(PATH.metadata_root, 'sunrgbd_train_test_data', str(i+1) + '_shift_5' + '.pickle')
            save_path_flip = op.join(PATH.metadata_root, 'sunrgbd_train_test_data', str(i+1) + '_flip' + '_shift_5' + '.pickle')
        else:
            save_path = op.join(PATH.metadata_root, 'sunrgbd_train_test_data', str(i + 1) + '.pickle')
            save_path_flip = op.join(PATH.metadata_root, 'sunrgbd_train_test_data', str(i + 1) + '_flip' + '.pickle')
        if (i + 1) <= 5050:
            test_path.append(save_path)
        else:
            train_path.append(save_path)
        with open(save_path, 'w') as f:
            pickle.dump(data, f)
        f.close()
        with open(save_path_flip, 'w') as f:
            pickle.dump(data_flip, f)
        f.close()
    print np.array(layout_centroid).mean(axis=0)
    print np.array(layout_coeffs).mean(axis=0)
    if not shift:
        with open(op.join(PATH.metadata_root, 'train.json'), 'w') as f:
            json.dump(train_path, f)
        f.close()
        with open(op.join(PATH.metadata_root, 'test.json'), 'w') as f:
            json.dump(test_path, f)
        f.close()
Example #35
0
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction=0.8

keras.backend.set_session(tf.Session(config=config))

import os
if os.path.isfile('BN_inception.h5'):
    bnincept = keras.models.load_model('BN_inception.h5')
else:
    bnincept = build_inception()

bnincept.summary()

keras.utils.plot_model(bnincept, to_file='BN_inception.png',
        show_shapes=True, show_layer_names=True)

#%%

chkpt = keras.callbacks.ModelCheckpoint('BN_inception.h5', period=1, save_best_only=True)
lrreduce = keras.callbacks.ReduceLROnPlateau(patience=3)
tfbord = keras.callbacks.TensorBoard(log_dir='BN_inception_logs')

callbacks = [chkpt, lrreduce, tfbord]
hist = bnincept.fit_generator(train_gen, epochs=40, validation_data=val_gen, validation_steps=1, callbacks=callbacks)

import pickle
with open('bn_hist', 'wb') as f:
    pickle.dump(hist, f)

# %%
Example #36
0
        "evolution": "abra",
        "satk": 80
    },
    "snorlax": {
        "type": "normal",
        "name": "Snorlax",
        "dexn": 41,
        "hp": 160,
        "atk": 110,
        "deff": 65,
        "spd": 30,
        "xp": 189,
        "lvlev": 101,
        "floor": [3, 4],
        "evolution": "abra",
        "satk": 90
    }
}

pickle_out = open("pokemondata.pickle", "wb")
pickle.dump(pokemondata, pickle_out)
pickle_out.close()

pickle_out2 = open("pokemondata2.pickle", "wb")
pickle.dump(pokemondata2, pickle_out2)
pickle_out2.close()

pickle_out3 = open("pokemondata3.pickle", "wb")
pickle.dump(pokemondata3, pickle_out3)
pickle_out3.close()
Example #37
0
def process_data(input_data):
    vocabulary_size = 10000
    z = zipfile.ZipFile(input_data, "r")
    suffix = "txt"
    nlp = spacy.load("en")
    skip_window = 1
    num_samples = 2
    return_file =''
    # ---------initial process-----------------------------------------------------------------------------
    for filename in z.namelist():
        filename = str(filename)
        if filename.endswith(suffix):
            content = z.read(filename)
            document = nlp(content.decode('utf8'))
            for i in range(len(document)):
                if document[i].is_space:
                    continue
                # if document[i].string == "\n":
                #     continue
                if document[i].is_alpha:
                    if document[i].pos_ == 'ADJ':
                    # document[i].string.strip()
                        aa = document[i].string.lower().strip()
                        total_list.append(aa)
                if document[i].pos_ == 'NUM':
                    aa = 'n'
                    total_list.append(aa)
    # print(dictionary_total)

    for i in range(len(total_list)):
        if total_list[i] not in dictionary_fre.keys():
            dictionary_fre[total_list[i]] = 1
        elif total_list[i] in dictionary_fre.keys():
            dictionary_fre[total_list[i]] += 1
    # print(dictionary_fre)
    dic_sort = sorted(dictionary_fre.items(), key=lambda x: x[1], reverse=True)
    # print(dic_sort)
    # print("55555  ", len(dic_sort))
    # print(dic_sort)
    for j in range(len(dic_sort)):
        if j < vocabulary_size:
            dictionary[dic_sort[j][0]] = j
            reverse_dictionary[j] = dic_sort[j][0]
    # print("77777777777  " , len(dictionary))
    print(dictionary)
    # print("0000000000   ", len(reverse_dictionary))
    print(reverse_dictionary)

    # -----------data and lable-----------------------------------------------------
    z = zipfile.ZipFile(input_data, "r")
    for filename in z.namelist():
        filename = str(filename)
        if filename.endswith(suffix):
            # print("666666666")
            content = z.read(filename)
            document = nlp(content.decode('utf8'))
            for i in range(len(document)):
                # if document[i].string == "\n":
                #     continue
                if document[i].is_space:
                    continue
                if document[i].is_alpha and document[i].string.lower().strip() in dictionary:
                    ccc = document[i].string.lower().strip()
                    # print("66666666", dictionary[ccc])
                    # print(ccc)
                    # print("7777777")
                    aaa = document[i].children
                    child_count = 0
                    for j in aaa:
                        if j.string.lower().strip() in dictionary:
                            count.append(j.string.lower().strip())
                            child_count += 1
                    for m in range(skip_window):
                        b = i - (m + 1)
                        if document[b].is_alpha and document[b].string.lower().strip() in dictionary:
                            # print(1)
                            count.append(document[b].string.lower().strip())
                            child_count += 1
                        d = i + (m + 1)
                        if d <= len(document) - 1:
                            if document[d].is_alpha and document[d].string.lower().strip() in dictionary:
                                count.append(document[d].string.lower().strip())
                                child_count += 1
                    for s in range(num_samples):
                        if s >= len(count):
                            break
                        data.append(dictionary[ccc])
                        if dictionary[ccc] >3624:
                            print("4444444", dictionary[ccc])
                        # print(dictionary[ccc])
                        label.append(dictionary[count[s]])
                        if dictionary[count[s]] > 3624:
                            print(count[s])
                            print(dictionary[count[s]])
    # print("888888" , len(data))
    # print("99999999  ", len(label))
    print("81111111111",data)
    print()
    print("333333333", label)
    f = open("data.txt", "wb")
    pickle.dump((dictionary,reverse_dictionary,data,label) ,f)

    return_file = "data.txt"
    return return_file
Example #38
0
def run_test():
    print('Starting model test.....')
    model.eval()  # Set model to evaluate mode.

    list_loss = []
    list_qloss = []
    list_ploss = []
    list_minade2, list_avgade2 = [], []
    list_minfde2, list_avgfde2 = [], []
    list_minade3, list_avgade3 = [], []
    list_minfde3, list_avgfde3 = [], []
    list_minmsd, list_avgmsd = [], []

    list_dao = []
    list_dac = []

    for test_time_ in range(test_times):

        epoch_loss = 0.0
        epoch_qloss = 0.0
        epoch_ploss = 0.0
        epoch_minade2, epoch_avgade2 = 0.0, 0.0
        epoch_minfde2, epoch_avgfde2 = 0.0, 0.0
        epoch_minade3, epoch_avgade3 = 0.0, 0.0
        epoch_minfde3, epoch_avgfde3 = 0.0, 0.0
        epoch_minmsd, epoch_avgmsd = 0.0, 0.0
        epoch_agents, epoch_agents2, epoch_agents3 = 0.0, 0.0, 0.0

        epoch_dao = 0.0
        epoch_dac = 0.0
        dao_agents = 0.0
        dac_agents = 0.0

        H = W = 64
        with torch.no_grad():
            if map_version == '2.0':
                coordinate_2d = np.indices((H, W))
                coordinate = np.ravel_multi_index(coordinate_2d, dims=(H, W))
                coordinate = torch.FloatTensor(coordinate)
                coordinate = coordinate.reshape((1, 1, H, W))

                coordinate_std, coordinate_mean = torch.std_mean(coordinate)
                coordinate = (coordinate - coordinate_mean) / coordinate_std

                distance_2d = coordinate_2d - np.array([(H - 1) / 2, (H - 1) / 2]).reshape((2, 1, 1))
                distance = np.sqrt((distance_2d ** 2).sum(axis=0))
                distance = torch.FloatTensor(distance)
                distance = distance.reshape((1, 1, H, W))

                distance_std, distance_mean = torch.std_mean(distance)
                distance = (distance - distance_mean) / distance_std

                coordinate = coordinate.to(device)
                distance = distance.to(device)

            c1 = -decoding_steps * np.log(2 * np.pi)

            for b, batch in enumerate(data_loader):

                scene_images, log_prior, \
                agent_masks, \
                num_src_trajs, src_trajs, src_lens, src_len_idx, \
                num_tgt_trajs, tgt_trajs, tgt_lens, tgt_len_idx, \
                tgt_two_mask, tgt_three_mask, \
                decode_start_vel, decode_start_pos, scene_id, batch_size = batch

                # Detect dynamic batch size

                num_three_agents = torch.sum(tgt_three_mask)
                """
                if map_version == '2.0':
                    coordinate_batch = coordinate.repeat(batch_size, 1, 1, 1)
                    distance_batch = distance.repeat(batch_size, 1, 1, 1)
                    scene_images = torch.cat((scene_images.to(device), coordinate_batch, distance_batch), dim=1)
                """
                src_trajs = src_trajs.to(device)
                src_lens = src_lens.to(device)

                tgt_trajs = tgt_trajs.to(device)[tgt_three_mask]
                tgt_lens = tgt_lens.to(device)[tgt_three_mask]

                num_tgt_trajs = num_tgt_trajs.to(device)
                episode_idx = torch.arange(batch_size, device=device).repeat_interleave(num_tgt_trajs)[tgt_three_mask]

                agent_masks = agent_masks.to(device)
                agent_tgt_three_mask = torch.zeros_like(agent_masks)
                agent_masks_idx = torch.arange(len(agent_masks), device=device)[agent_masks][tgt_three_mask]
                agent_tgt_three_mask[agent_masks_idx] = True

                decode_start_vel = decode_start_vel.to(device)[agent_tgt_three_mask]
                decode_start_pos = decode_start_pos.to(device)[agent_tgt_three_mask]

                log_prior = log_prior.to(device)

                gen_trajs = model(src_trajs, src_lens, agent_tgt_three_mask, decode_start_vel, decode_start_pos, num_src_trajs, scene_images)

                gen_trajs = gen_trajs.reshape(num_three_agents, num_candidates, decoding_steps, 2)


                rs_error3 = ((gen_trajs - tgt_trajs.unsqueeze(1)) ** 2).sum(dim=-1).sqrt_()
                rs_error2 = rs_error3[..., :int(decoding_steps * 2 / 3)]

                diff = gen_trajs - tgt_trajs.unsqueeze(1)
                msd_error = (diff[:, :, :, 0] ** 2 + diff[:, :, :, 1] ** 2)

                num_agents = gen_trajs.size(0)
                num_agents2 = rs_error2.size(0)
                num_agents3 = rs_error3.size(0)

                ade2 = rs_error2.mean(-1)
                fde2 = rs_error2[..., -1]

                minade2, _ = ade2.min(dim=-1)
                avgade2 = ade2.mean(dim=-1)
                minfde2, _ = fde2.min(dim=-1)
                avgfde2 = fde2.mean(dim=-1)

                batch_minade2 = minade2.mean()
                batch_minfde2 = minfde2.mean()
                batch_avgade2 = avgade2.mean()
                batch_avgfde2 = avgfde2.mean()

                ade3 = rs_error3.mean(-1)
                fde3 = rs_error3[..., -1]

                msd = msd_error.mean(-1)
                minmsd, _ = msd.min(dim=-1)
                avgmsd = msd.mean(dim=-1)
                batch_minmsd = minmsd.mean()
                batch_avgmsd = avgmsd.mean()

                minade3, _ = ade3.min(dim=-1)
                avgade3 = ade3.mean(dim=-1)
                minfde3, _ = fde3.min(dim=-1)
                avgfde3 = fde3.mean(dim=-1)

                batch_minade3 = minade3.mean()
                batch_minfde3 = minfde3.mean()
                batch_avgade3 = avgade3.mean()
                batch_avgfde3 = avgfde3.mean()


                batch_loss = batch_minade3
                epoch_loss += batch_loss.item()
                batch_qloss = torch.zeros(1)
                batch_ploss = torch.zeros(1)

                print("Working on test {:d}/{:d}, batch {:d}/{:d}... ".format(test_time_ + 1, test_times, b + 1,
                                                                              len(data_loader)), end='\r')  # +

                epoch_ploss += batch_ploss.item() * batch_size
                epoch_qloss += batch_qloss.item() * batch_size
                epoch_minade2 += batch_minade2.item() * num_agents2
                epoch_avgade2 += batch_avgade2.item() * num_agents2
                epoch_minfde2 += batch_minfde2.item() * num_agents2
                epoch_avgfde2 += batch_avgfde2.item() * num_agents2
                epoch_minade3 += batch_minade3.item() * num_agents3
                epoch_avgade3 += batch_avgade3.item() * num_agents3
                epoch_minfde3 += batch_minfde3.item() * num_agents3
                epoch_avgfde3 += batch_avgfde3.item() * num_agents3

                epoch_minmsd += batch_minmsd.item() * num_agents3
                epoch_avgmsd += batch_avgmsd.item() * num_agents3

                epoch_agents += num_agents
                epoch_agents2 += num_agents2
                epoch_agents3 += num_agents3

                map_files = map_file(scene_id)
                output_files = [out_dir + '/' + x[2] + '_' + x[3] + '.jpg' for x in scene_id]

                cum_num_tgt_trajs = [0] + torch.cumsum(num_tgt_trajs, dim=0).tolist()
                cum_num_src_trajs = [0] + torch.cumsum(num_src_trajs, dim=0).tolist()

                src_trajs = src_trajs.cpu().numpy()
                src_lens = src_lens.cpu().numpy()

                tgt_trajs = tgt_trajs.cpu().numpy()
                tgt_lens = tgt_lens.cpu().numpy()

                zero_ind = np.nonzero(tgt_three_mask.numpy() == 0)[0]
                zero_ind -= np.arange(len(zero_ind))

                tgt_three_mask = tgt_three_mask.numpy()
                agent_tgt_three_mask = agent_tgt_three_mask.cpu().numpy()

                gen_trajs = gen_trajs.cpu().numpy()

                src_mask = agent_tgt_three_mask

                gen_trajs = np.insert(gen_trajs, zero_ind, 0, axis=0)

                tgt_trajs = np.insert(tgt_trajs, zero_ind, 0, axis=0)
                tgt_lens = np.insert(tgt_lens, zero_ind, 0, axis=0)

                for i in range(1):
                    candidate_i = gen_trajs[cum_num_tgt_trajs[i]:cum_num_tgt_trajs[i + 1]]
                    tgt_traj_i = tgt_trajs[cum_num_tgt_trajs[i]:cum_num_tgt_trajs[i + 1]]
                    tgt_lens_i = tgt_lens[cum_num_tgt_trajs[i]:cum_num_tgt_trajs[i + 1]]

                    src_traj_i = src_trajs[cum_num_src_trajs[i]:cum_num_src_trajs[i + 1]]
                    src_lens_i = src_lens[cum_num_src_trajs[i]:cum_num_src_trajs[i + 1]]
                    map_file_i = map_files[i]
                    output_file_i = output_files[i]

                    candidate_i = candidate_i[tgt_three_mask[cum_num_tgt_trajs[i]:cum_num_tgt_trajs[i + 1]]]
                    tgt_traj_i = tgt_traj_i[tgt_three_mask[cum_num_tgt_trajs[i]:cum_num_tgt_trajs[i + 1]]]
                    tgt_lens_i = tgt_lens_i[tgt_three_mask[cum_num_tgt_trajs[i]:cum_num_tgt_trajs[i + 1]]]

                    src_traj_i = src_traj_i[agent_tgt_three_mask[cum_num_src_trajs[i]:cum_num_src_trajs[i + 1]]]
                    src_lens_i = src_lens_i[agent_tgt_three_mask[cum_num_src_trajs[i]:cum_num_src_trajs[i + 1]]]

                    dao_i, dao_mask_i = dao(candidate_i, map_file_i)
                    dac_i, dac_mask_i = dac(candidate_i, map_file_i)

                    epoch_dao += dao_i.sum()
                    dao_agents += dao_mask_i.sum()

                    epoch_dac += dac_i.sum()
                    dac_agents += dac_mask_i.sum()

                    write_img_output(candidate_i, src_traj_i, src_lens_i, tgt_traj_i, tgt_lens_i, map_file_i,
                                     'test/img')
            print(1)



        list_loss.append(epoch_loss / epoch_agents)

        # 2-Loss
        list_minade2.append(epoch_minade2 / epoch_agents2)
        list_avgade2.append(epoch_avgade2 / epoch_agents2)
        list_minfde2.append(epoch_minfde2 / epoch_agents2)
        list_avgfde2.append(epoch_avgfde2 / epoch_agents2)

        # 3-Loss
        list_minade3.append(epoch_minade3 / epoch_agents3)
        list_avgade3.append(epoch_avgade3 / epoch_agents3)
        list_minfde3.append(epoch_minfde3 / epoch_agents3)
        list_avgfde3.append(epoch_avgfde3 / epoch_agents3)

        list_minmsd.append(epoch_minmsd / epoch_agents3)
        list_avgmsd.append(epoch_avgmsd / epoch_agents3)

        list_dao.append(epoch_dao / dao_agents)
        list_dac.append(epoch_dac / dac_agents)


    test_ploss = [0.0, 0.0]
    test_qloss = [0.0, 0.0]
    test_loss = [np.mean(list_loss), np.std(list_loss)]

    test_minade2 = [np.mean(list_minade2), np.std(list_minade2)]
    test_avgade2 = [np.mean(list_avgade2), np.std(list_avgade2)]
    test_minfde2 = [np.mean(list_minfde2), np.std(list_minfde2)]
    test_avgfde2 = [np.mean(list_avgfde2), np.std(list_avgfde2)]

    test_minade3 = [np.mean(list_minade3), np.std(list_minade3)]
    test_avgade3 = [np.mean(list_avgade3), np.std(list_avgade3)]
    test_minfde3 = [np.mean(list_minfde3), np.std(list_minfde3)]
    test_avgfde3 = [np.mean(list_avgfde3), np.std(list_avgfde3)]

    test_minmsd = [np.mean(list_minmsd), np.std(list_minmsd)]
    test_avgmsd = [np.mean(list_avgmsd), np.std(list_avgmsd)]

    test_dao = [np.mean(list_dao), np.std(list_dao)]
    test_dac = [np.mean(list_dac), np.std(list_dac)]

    test_ades = (test_minade2, test_avgade2, test_minade3, test_avgade3)
    test_fdes = (test_minfde2, test_avgfde2, test_minfde3, test_avgfde3)

    print("--Final Performane Report--")
    print("minADE3: {:.5f}±{:.5f}, minFDE3: {:.5f}±{:.5f}".format(test_minade3[0], test_minade3[1], test_minfde3[0],
                                                                  test_minfde3[1]))
    print("avgADE3: {:.5f}±{:.5f}, avgFDE3: {:.5f}±{:.5f}".format(test_avgade3[0], test_avgade3[1], test_avgfde3[0],
                                                                  test_avgfde3[1]))
    print("DAO: {:.5f}±{:.5f}, DAC: {:.5f}±{:.5f}".format(test_dao[0] * 10000.0, test_dao[1] * 10000.0, test_dac[0],
                                                          test_dac[1]))
    with open(out_dir + '/metric.pkl', 'wb') as f:
        pkl.dump({"ADEs": test_ades,
                  "FDEs": test_fdes,
                  "Qloss": test_qloss,
                  "Ploss": test_ploss,
                  "DAO": test_dao,
                  "DAC": test_dac}, f)
Example #39
0
    if period_is_hp:
        pushMetricToDatabase("instant_watt_hp", currentConsumptionW(instant_intensity), True)
        pushMetricToDatabase("instant_watt_hc", 0, True)
    else:
        pushMetricToDatabase("instant_watt_hp", 0, True)
        pushMetricToDatabase("instant_watt_hc", currentConsumptionW(instant_intensity), True)

    ### Debut du mois, on met a jour les donnees suivantes dans la base de donnees InfluxDB :
    ###     - current_month_percent : pourcentage consomme en euros par rapport a la facture previsionnelle prevue par EDF
    ###     - month_eur : somme totale d'euros consommes pour le mois en cours
    ### en effet, si la consommation est pour l'instant nulle, on ajoute un "offset" qui correspond au montant de l'abonnement EDF
    if ( date.day == 1 and date.hour == 0 and date.minute == 1):
        stored_measures = pickle.load( open( "data.pickle", "rb" ) )
        pushMetricToDatabase(month_eur[dateToIndex(date.month)], stored_measures["monthly_eur"], False)
        stored_measures["monthly_eur"] = montly_cta_price_net + monthly_subscription_net
        pickle.dump( stored_measures, open( "data.pickle", "wb" ) )

    ### Fin de la journee, on met a jour les donnees suivantes dans la base de donnees InfluxDB :
    ###     - daily_eur : quantite d'electricite consommee dans la journee en euros
    ###     - daily_kwh : quantite d'electricite consommee dans la journee en kWh
    ###     - current_month_percent : pourcentage consomme en euros par rapport a la facture previsionnelle prevue par EDF
    ###     - month_eur : somme totale d'euros consommes pour le mois en cours
    if ( date.hour == 23 and date.minute == 59):
        stored_measures = pickle.load( open( "data.pickle", "rb" ) )        
        # On calclue la consommation journaliere
        day_kwh_hc = current_kwh_hc - stored_measures["last_day_kwh_hc"]
        day_kwh_hp = current_kwh_hp - stored_measures["last_day_kwh_hp"]
        # On converti la consommation electique en consommation pecuniere
        day_eur_hc = kwhToEurosHc(day_kwh_hc)
        day_eur_hp = kwhToEurosHp(day_kwh_hp)
        # On somme la consommation journaliere
Example #40
0
def tokenize(texts, model_file, create_dictionnary, insy=False):

    # initialize variables

    if create_dictionnary:
        my_dictionary = {}
        my_dictionary["word_index"] = {}
        my_dictionary["index_word"] = {}
        my_dictionary["word_index"]["<PAD>"] = 0
        my_dictionary["index_word"][0] = "<PAD>"
        index = 0
    else:
        with open(model_file + ".index", 'rb') as handle:
            my_dictionary = pickle.load(handle)

    data = (np.zeros((len(texts), MAX_SEQUENCE_LENGTH))).astype('int32')

    i = 0

    # If indexsystem has been provided, make sure that it contains words for padding and target
    if insy:
        insy.word_to_index["<PAD>"] = 0
        insy.word_to_index["<TARGET>"] = len(insy.word_to_index)
        insy.index_to_word[0] = "<PAD>"
        insy.index_to_word.append("<TARGET>")
        if not "OOV" in insy.word_to_index:
            index = len(insy.word_to_index)
            insy.index_to_word.append("OOV")
            insy.word_to_index["OOV"] = index
        my_dictionary["word_index"] = insy.word_to_index
        my_dictionary["index_word"] = insy.index_to_word

    # Loop through input sentences and vectorze them
    for line in texts:
        words = line.split()[:MAX_SEQUENCE_LENGTH]
        sentence_length = len(words)
        sentence = []
        for word in words:
            # If no indexsystem was provided, add new words to vocabulary as you encounter them
            if not insy:
                if word not in my_dictionary["word_index"].keys():
                    if create_dictionnary:
                        index += 1
                        my_dictionary["word_index"][word] = index
                        my_dictionary["index_word"][index] = word
                    else:
                        my_dictionary["word_index"][word] = my_dictionary[
                            "word_index"]["<PAD>"]
                sentence.append(my_dictionary["word_index"][word])
            # If indexsystem was provided, use that index mapping for vectorization
            else:
                if word in insy.word_to_index:
                    sentence.append(my_dictionary["word_index"][word])
                else:
                    sentence.append(my_dictionary["word_index"]["OOV"])

        # If sentence is shorter than other sentences in its batch, add zero-padding
        if sentence_length < MAX_SEQUENCE_LENGTH:
            for j in range(MAX_SEQUENCE_LENGTH - sentence_length):
                sentence.append(my_dictionary["word_index"]["<PAD>"])

        data[i] = sentence
        i += 1

    # Save indexation along with model
    if create_dictionnary:
        with open(model_file + ".index", 'wb') as handle:
            pickle.dump(my_dictionary,
                        handle,
                        protocol=pickle.HIGHEST_PROTOCOL)

    return my_dictionary, data
Example #41
0
def training():

    db = firestore.client()
    doc_ref = db.collection(u'ratings').where(u'book', u'==', True).stream()

    ref = db.collection_group(u'review')\
        .where(u'reviewed', u'==', True).order_by('user_id')
    docs = ref.stream()
    for doc in docs:
        data1.append(doc.to_dict())
    # print(type(data), data)
    df = pd.DataFrame(data1)
    df1 = df[['user_id', 'book_id', 'rating']]
    # print(df1)

    ratingss = pd.read_csv('dataset/ratings.csv',
                           usecols=['user_id', 'book_id', 'rating'])
    # print(ratings.head(5))
    ratings = pd.concat([ratingss, df1])
    # print(new)
    ratings['user_id'] = ratings['user_id'].apply(str)

    n_users = ratings.user_id.unique().shape[0]
    n_books = ratings.book_id.unique().shape[0]
    print('Number of users = ' + str(n_users) +
          ' | Number of books = ' + str(n_books))

    Ratings = ratings.pivot(
        index='user_id', columns='book_id', values='rating').fillna(0)
    # Ratings.head()

    R = Ratings.to_numpy()
    user_ratings_mean = np.mean(R, axis=1)
    Ratings_demeaned = R - user_ratings_mean.reshape(-1, 1)

    sparsity = round(1.0 - len(ratings) / float(n_users * n_books), 3)
    # print ('The sparsity level of Goodbooks10k dataset is ' +  str(sparsity * 100) + '%')

    U, sigma, Vt = svds(Ratings_demeaned, k=50)

    sigma = np.diag(sigma)
    # sigma

    all_user_predicted_ratings = np.dot(
        np.dot(U, sigma), Vt) + user_ratings_mean.reshape(-1, 1)

    preds = pd.DataFrame(all_user_predicted_ratings, columns=Ratings.columns)

    reader = Reader()

    # Load ratings dataset with Dataset library
    data = Dataset.load_from_df(
        ratings[['user_id', 'book_id', 'rating']], reader)

    # Split the dataset for 5-fold evaluation
    kf = KFold(n_splits=5)
    svd = SVD()

    for trainset, testset in kf.split(data):

        # train and test algorithm.
        svd.fit(trainset)
        predictions = svd.test(testset)

        # Compute and print Root Mean Squared Error
        accuracy.rmse(predictions, verbose=True)

    trainset = data.build_full_trainset()
    svd.fit(trainset)
    print('Data Trained Successfully')
    with open('model_pickle', 'wb') as f:
        pickle.dump(svd, f)
def get_matches(summonerName, summonerCount):

    # Break Recursion
    if summonerCount == maxSummonerCount:
        return
    summonerCount += 1

    #Get Current Summoner Info
    print("~~~~~GETTING MATCHES FOR SUMMONER: " + summonerName + "~~~~~")
    summoner = cass.get_summoner(name=summonerName)
    current_history = summoner.match_history.filter(
        lambda match: match.queue == cass.data.Queue.ranked_solo_fives)

    #Load Existing Summoners List from summonerFile
    try:
        summonerFile = open(summonerFileName, 'rb')
        summonerDict = pickle.load(summonerFile)
        summonerFile.close()
    except:
        summonerFile = open(summonerFileName, 'wb')
        summonerFile.close()
        summonerDict = {}
    #Load Existing matches List from matchFile
    try:
        matchFile = open(matchFileName, 'rb')
        matches = pickle.load(matchFile)
        matchFile.close()
    except:
        matchFile = open(matchFileName, 'wb')
        matchFile.close()
        matches = []

    #Collect Matches & Summoners
    x = 0
    for match in current_history:
        # Match Iteration Management
        if (x == maxMatchCount):
            break
        print("(" + str(x + 1) + " Matches saved) Match id#" + str(match.id) +
              " being added...")
        x += 1
        time.sleep(sleepTime)

        #Add the match to the matchlist
        matches.append(match)

        #Add the summoners to summonerList
        summonerDict[str(match.participants[0].summoner.name)] = 0
        summonerDict[str(match.participants[1].summoner.name)] = 0
        summonerDict[str(match.participants[2].summoner.name)] = 0
        summonerDict[str(match.participants[3].summoner.name)] = 0
        summonerDict[str(match.participants[4].summoner.name)] = 0
        summonerDict[str(match.participants[5].summoner.name)] = 0
        summonerDict[str(match.participants[6].summoner.name)] = 0
        summonerDict[str(match.participants[7].summoner.name)] = 0
        summonerDict[str(match.participants[8].summoner.name)] = 0
        summonerDict[str(match.participants[9].summoner.name)] = 0

    summonerDict[str(summoner.name)] = 1

    # Write matchFile
    matchFile = open(matchFileName, 'wb')
    pickle.dump(matches, matchFile)
    matchFile.close()

    # Write summonerFile
    summonerFile = open(summonerFileName, 'wb')
    pickle.dump(summonerDict, summonerFile)
    summonerFile.close()

    # Select Random Summoner
    summonerDict2 = {}

    for key in summonerDict.keys():
        if summonerDict[key] == 0:
            summonerDict2[key] = 0
    summonerList = list(summonerDict2)

    randomSummonerName = summonerList[randint(0, len(summonerList) - 1)]

    get_matches(randomSummonerName, summonerCount)
Example #43
0
                RMSlist_MMFF.append(RMS_MMFF)
                pred_mmff.append(c.GetPositions())
            tpred_mmff.extend(pred_mmff)
            ttest_mmff.extend(RMSlist_MMFF)
        except:
            continue

    # save results per molecule
    if args.savepermol:
        mol_info = {'n_heavy_atoms': n_est, 'n_rot_bonds': n_rot_bonds}
        if len(ttest_mmff) > 0:
            mol_info["mmff"] = np.array(ttest_mmff)
            mol_info["pred_mmff"] = np.stack(tpred_mmff)
        if len(ttest_uff) > 0:
            mol_info["uff"] = np.array(ttest_uff)
        pkl.dump(mol_info, \
            open(os.path.join(dir_name, "mols", 'mol_{}.p'.format(t)), 'wb'))

    if len(ttest_uff) > 0:
        mean_ttest, std_ttest = np.mean(ttest_uff, 0), np.std(ttest_uff, 0)
        uff.append([mean_ttest, std_ttest, len(ttest_uff)])

    if len(ttest_mmff) > 0:
        mean_ttest, std_ttest = np.mean(ttest_mmff, 0), np.std(ttest_mmff, 0)
        mmff.append([mean_ttest, std_ttest, len(ttest_mmff)])

print ("Done!")
print ("UFF results")
print (np.mean(np.array(uff)[:,0]), np.mean(np.array(uff)[:,1]))
print ("MMFF results")
print (np.mean(np.array(mmff)[:,0]), np.mean(np.array(mmff)[:,1]))
Example #44
0
def createDataFile(list, division):
    path = "./Data/image.dat"

    with open(path, 'wb') as f:
        pickle.dump(list, f)
def saveTangles(tangleObj, filename):
    fp = open(filename, "wb")
    pickle.dump(tangleObj, fp)
    fp.close()
    return True
Example #46
0
                f.write("Current iteration: {} \n".format(iteration))
                f.close()

    train_agent(n_iterations=n_iterations)

    # c) For storing frames
    def get_vid_frames(policy, filename, num_episodes=100, fps=2):
        frames = []
        for _ in range(num_episodes):
            time_step = tf_env.reset()
            frames.append(np.abs(env.get_board()))
            while not time_step.is_last():
                action_step = policy.action(time_step)
                time_step = tf_env.step(action_step.action)
                frames.append(np.abs(env.get_board()))
        return frames

    # Store Data
    df = pd.DataFrame(np.array(training_info).T,
                      columns=['N_Ep', 'Env_Steps', 'Avf_RM', 'Avg_EPLM'])
    df.to_csv('../DATA/Single/stats_{}.txt'.format(II), index=False, mode="a")

    # Store Frames
    frames = get_vid_frames(agent.policy, "trained-agent")
    with open('../DATA/Single/frames_{}.pkl'.format(II), 'wb') as f:
        pickle.dump(frames, f)

    # Store Model
    my_policy = agent.policy
    saver = PolicySaver(my_policy, batch_size=None)
    saver.save('../DATA/Single/policy_{}'.format(II))
Example #47
0
def Open_URL(
        url='',
        post_type='get',
        payload={},
        headers={
            'User-Agent':
            'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
        },
        cookies=True,
        auth=None,
        timeout=None,
        cookiejar=None,
        proxies={}):
    """
If you need to pull the contents of a webpage it's very simple to do so by using this function.
This uses the Python Requests module, for more detailed info on how the params work
please look at the following link: http://docs.python-requests.org/en/master/user/advanced/

IMPORTANT: This function will attempt to convert a url with a query string into the
correct params for a post or get command but I highly recommend sending through your
query string as a dictionary using the payload params. It's much cleaner and is a
safer way of doing things, if you send through your url with a query string attached
then I take no responsibility if it doesn't work!

CODE:   Open_URL(url,[post_type,payload,headers,cookies,auth,timeout,cookiejar])

AVAILABLE PARAMS:

    url  -  This is the main url you want to send through. Send it through
    as a query string format even if it's a post.

    post_type  -  By default this is set to 'get' but this can be set to 'post',
    if set to post the query string will be split up into a post format automatically.
    
    payload - By default this is not used but if you just want a standard
    basic Open_URL function you can add a dictionary of params here. If you
    don't enter anything in here the function will just split up your url
    accordingly. Make sure you read the important information at the top
    of this tutorial text.

    headers -  Optionally send through headers in form of a dictionary.

    cookies  -  If set to true your request will send through and store cookies.

    auth  -  User/pass details

    timeout  -  Optionally set a timeout for the request.

    cookiejar  -  An name for the location to store cookies. By default it's
    set to addon_data/<addon_id>/cookies/cookiejar but if you have multiple
    websites you access then you may want to use a separate filename for each site.

    proxies - Use a proxy for accessing the link, see requests documentation for full
    information but essentially you would send through a dictionary like this:
    proxies = {"http":"http://10.10.1.10:3128","htts":"https://10.10.1.10:3128"}

EXAMPLE CODE:
dialog.ok('[COLOR gold]OPEN FORUM PAGE[/COLOR]','We will attempt to open the noobsandnerds forum page and return the contents. You will now be asked for your forum credentials.')
myurl = 'http://noobsandnerds.com/support/index.php'
username = koding.Keyboard('ENTER USERNAME')
password = koding.Keyboard('ENTER PASSWORD')
params = {"username":username,"password":password}
xbmc.log(repr(params),2)
url_contents = koding.Open_URL(url=myurl, payload=params, post_type='get')
koding.Text_Box('CONTENTS OF WEB PAGE',url_contents)
~"""
    import os
    import pickle
    import requests
    import sys
    import xbmc
    import xbmcaddon

    from __init__ import converthex, dolog, Encryption, ADDON_ID, LOGIN, FORUM, USERNAME, PASSWORD, KODI_VER
    from addons import Addon_Info
    from filetools import Text_File

    dolog('POST TYPE: %s' % post_type)
    dolog('url: %s' % url)
    Addon_Version = Addon_Info(id='version')
    Addon_Profile = xbmc.translatePath(Addon_Info(id='profile'))
    Cookie_Folder = os.path.join(Addon_Profile, 'cookies')
    if not os.path.exists(Cookie_Folder):
        os.makedirs(Cookie_Folder)

    if cookiejar == None:
        Cookie_Jar = os.path.join(Cookie_Folder, 'cookiejar')
    else:
        Cookie_Jar = os.path.join(Cookie_Folder, cookiejar)

    my_cookies = None
    if cookies:
        if os.path.exists(Cookie_Jar):
            try:
                with open(Cookie_Jar, 'rb') as f:
                    my_cookies = pickle.load(f)
            except:
                my_cookies = None

# If the payload is empty we split the params
    if len(payload) == 0:
        dolog('###### QUERY STRING CONVERSION MODE')

        # If the url sent through is not http then we presume it's hitting the NaN page
        if not url.startswith(converthex('68747470')):
            NaN_URL = True
            args = url
            post_type = 'post'
            url = converthex(
                '687474703a2f2f6e6f6f6273616e646e657264732e636f6d2f43505f53747566662f6c6f67696e5f74657374696e672e7068703f753d257326703d257326663d257326613d257326763d2573266b3d257326653d2573'
            ) % (USERNAME, PASSWORD, FORUM, ADDON_ID, Addon_Version, KODI_VER,
                 args)
        else:
            NaN_URL = False
        if '?' in url:
            url, args = url.split('?')
            args = args.split('&')
            for item in args:
                var, data = item.split('=')
                if NaN_URL:
                    payload[var] = Encryption('e', data)
                else:
                    payload[var] = data

    dolog('PAYLOAD: %s' % payload)

    try:
        if post_type == 'post':
            r = requests.post(url,
                              payload,
                              headers=headers,
                              cookies=my_cookies,
                              auth=auth,
                              timeout=timeout,
                              proxies=proxies)
        else:
            r = requests.get(url,
                             payload,
                             headers=headers,
                             cookies=my_cookies,
                             auth=auth,
                             timeout=timeout,
                             proxies=proxies)
    except:
        dolog('Failed to pull content for %s' % url)
        return False
    dolog('### CODE: %s   |   REASON: %s' % (r.status_code, r.reason))
    if r.status_code >= 200 and r.status_code < 400:
        content = r.text.encode('utf-8')
        dolog('content: %s' % content)
        if cookies:
            with open(Cookie_Jar, 'wb') as f:
                pickle.dump(r.cookies, f)
        return content
    else:
        dolog('Failed to pull content for %s' % url)
        return False
def _save_model(filepath, vectorizer, classifiers):
  with open(filepath, 'wb') as f:
    pickle.dump([vectorizer, classifiers], f)
Example #49
0
 def _check_and_load(self, ext, img, f, verbose=True):
     if not os.path.isfile(f) or ext.find('reset') >= 0:
         if verbose:
             print('Making a binary: {}'.format(f))
         with open(f, 'wb') as _f:
             pickle.dump(imageio.imread(img), _f)
    def indexed_input(Tx_inout, public_key, amt, index_map):
        if not public_key in index_map:
            index_map[public_key] = 0            
        Tx_inout.add_input(public_key, amt)
        index_map[public_key] = index_map[public_key] + 1
    
###################Tangle Genesis#####################
    Tx1 = Tx()
    indexed_input(Tx1, pu1, 1, pu_indeces)
    Tx1.sign(pr1)

    if Tx1.is_valid():
        print("Success! Tx is valid")

    savefile = open("tx.dat", "wb")
    pickle.dump(Tx1, savefile)
    savefile.close()

    loadfile = open("tx.dat", "rb")
    newTx = pickle.load(loadfile)

    if newTx.is_valid():
        print("Success! Loaded tx is valid")
    loadfile.close()

    root = TxBlock(None,None)
    root.addTx(Tx1)
    B1 = root
    start = time.time()
    print(B1.find_nonce())
    elapsed = time.time() - start
Example #51
0
CLUSTERS = set(INPUT.iloc[:, 0])
DATAPOINTS = set(INPUT.values.reshape(INPUT.shape[0] * INPUT.shape[1]))

ClustInd = {}
for ii, cluster in enumerate(CLUSTERS):
    ClustInd[cluster] = ii

GenomesInd = {}
for ii, genome in enumerate(GENOMES):
    GenomesInd[genome] = ii

TABLE = np.zeros([len(GENOMES), len(CLUSTERS)]).astype('int')

for ii in INPUT.index:

    #    print(str(ii)+'/'+str(INPUT.shape[0]))

    cluster = INPUT.iloc[ii, 0]
    genome = INPUT.iloc[ii, 1].split('$')[0]

    TABLE[GenomesInd[genome],
          ClustInd[cluster]] = TABLE[GenomesInd[genome], ClustInd[cluster]] + 1

OUTPUT = pd.DataFrame(index=GENOMES, columns=CLUSTERS, data=TABLE)

print('Done!')

print('Saving output ...')
pickle.dump(OUTPUT, open(OUTPUTFOLDER + '/GeneCount.p', 'wb'), protocol=4)
print('Done!')
Example #52
0
 def save(self, filename):
     """Write out the current kvstore to a pickle file"""
     with open(filename, 'w') as f:
         pickle.dump(self.data, f)
Example #53
0
def poly(data, label, n_folds=10, scale=True, exclude=[],
         feature_selection=False, save=True, scoring='auc',
         project_name='', concurrency=1, verbose=True):
    '''
    Input
    data         = numpy matrix with as many rows as samples
    label        = numpy vector that labels each data row
    n_folds      = number of folds to run
    scale        = whether to scale data or not
    exclude      = list of classifiers to exclude from the analysis
    feature_selection = whether to use feature selection or not (anova)
    save         = whether to save intermediate steps or not
    scoring      = Type of score to use ['auc', 'f1']
    project_name = prefix used to save the intermediate steps
    concurrency  = number of parallel jobs to run
    verbose      = whether to print or not results

    Ouput
    scores       = matrix with scores for each fold and classifier
    confusions   = confussion matrix for each classifier
    predictions  = Cross validated predicitons for each classifier
    '''

    assert label.shape[0] == data.shape[0], \
        "Label dimesions do not match data number of rows"
    _le = LabelEncoder()
    label = _le.fit_transform(label)
    n_class = len(np.unique(label))

    if save and not os.path.exists('poly_{}/models'.format(project_name)):
        os.makedirs('poly_{}/models'.format(project_name))

    if not verbose:
        logger.setLevel(logging.ERROR)
    logger.info('Building classifiers ...')
    classifiers = build_classifiers(exclude, scale, feature_selection,
                                    data.shape[1])

    scores = pd.DataFrame(columns=pd.MultiIndex.from_product(
        [classifiers.keys(), ['train', 'test']]),
        index=range(n_folds))
    predictions = pd.DataFrame(columns=classifiers.keys(),
                               index=range(data.shape[0]))
    test_prob = pd.DataFrame(columns=classifiers.keys(),
                             index=range(data.shape[0]))
    confusions = {}

    logger.info('Initialization, done.')

    kf = list(StratifiedKFold(label, n_folds=n_folds, random_state=1988))

    # Parallel processing of tasks
    manager = Manager()
    args = manager.list()
    args.append({})  # Store inputs
    shared = args[0]
    shared['kf'] = kf
    shared['X'] = data
    shared['y'] = label
    args[0] = shared

    args2 = []
    for clf_name, val in classifiers.items():
        for n_fold in range(n_folds):
            args2.append((args, clf_name, val, n_fold, project_name,
                          save, scoring))

    if concurrency == 1:
        result = list(starmap(fit_clf, args2))
    else:
        pool = Pool(processes=concurrency)
        result = pool.starmap(fit_clf, args2)
        pool.close()

    fitted_clfs = {key: [] for key in classifiers}

    # Gather results
    for clf_name in classifiers:
        temp = np.zeros((n_class, n_class))
        temp_pred = np.zeros((data.shape[0], ))
        temp_prob = np.zeros((data.shape[0], ))
        clfs = fitted_clfs[clf_name]
        for n in range(n_folds):
            train_score, test_score, prediction, prob, confusion,\
                fitted_clf = result.pop(0)
            clfs.append(fitted_clf)
            scores.loc[n, (clf_name, 'train')] = train_score
            scores.loc[n, (clf_name, 'test')] = test_score
            temp += confusion
            temp_prob[kf[n][1]] = prob
            temp_pred[kf[n][1]] = _le.inverse_transform(prediction)

        confusions[clf_name] = temp
        predictions[clf_name] = temp_pred
        test_prob[clf_name] = temp_prob

    # Voting
    fitted_clfs = pd.DataFrame(fitted_clfs)
    scores['Voting', 'train'] = np.zeros((n_folds, ))
    scores['Voting', 'test'] = np.zeros((n_folds, ))
    temp = np.zeros((n_class, n_class))
    temp_pred = np.zeros((data.shape[0], ))
    for n, (train, test) in enumerate(kf):
        clf = PolyVoter(fitted_clfs.loc[n].values)
        X, y = data[train, :], label[train]
        scores.loc[n, ('Voting', 'train')] = _scorer(clf, X, y)
        X, y = data[test, :], label[test]
        scores.loc[n, ('Voting', 'test')] = _scorer(clf, X, y)
        temp_pred[test] = clf.predict(X)
        temp += confusion_matrix(y, temp_pred[test])

    confusions['Voting'] = temp
    predictions['Voting'] = temp_pred
    test_prob['Voting'] = temp_pred
    ######

    # saving confusion matrices
    if save:
        with open('poly_' + project_name + '/confusions.pkl', 'wb') as f:
            p.dump(confusions, f, protocol=2)

    if verbose:
        print(scores.astype('float').describe().transpose()
              [['mean', 'std', 'min', 'max']])
    return scores, confusions, predictions, test_prob
def generate_data(save_lumps_pos=False, show_images=False, pause_images=False,
                  discrete_centers=False, dataset_name="lumpy_dataset", lumps_version=0,
                  num_samples=100, number_first_patient=0, cut_edges_margin=None):
    """Generate num_samples lumpy images for label 0 and 1, save them, and possibly plot them."""
    print("Samples generated for each label: " + str(num_samples))

    if lumps_version != 0:
        dataset_name += "_v{}".format(lumps_version)

    if show_images:
        plt.ion()

    # Save or show data
    percent = 1
    split_distance = num_samples * percent // 100
    split_distance = 1 if split_distance < 1 else split_distance
    params0 = get_params_label_0(version=lumps_version, discrete_positions=discrete_centers)
    params1 = get_params_label_1(version=lumps_version, discrete_positions=discrete_centers)
    volumes = []
    labels = []
    patients = []
    masks = []
    centers = []
    patient_counter = number_first_patient
    print("{}. 0% loaded (0/{} samples)".format(get_current_time(), num_samples))
    for i in range(num_samples):
        # Save lumpy images for label 0 and 1
        image0, lumps, background, pos_lumps0 = get_lumpy_image(*params0)
        mask0 = generate_mask(image0, params0[-1])
        if cut_edges_margin is not None:
            image0, mask0 = remove_healthy_top_and_bottom_slices(image0, mask0, cut_edges_margin)
        volumes.append(image0)
        masks.append(mask0)
        labels.append(0)
        patients.append("{:08d}".format(patient_counter))
        patient_counter += 1
        image1, lumps, background, pos_lumps1 = get_lumpy_image(*params1)
        mask1 = generate_mask(image1, params0[-1])
        if cut_edges_margin is not None:
            image1, mask1 = remove_healthy_top_and_bottom_slices(image1, mask1, cut_edges_margin)
        volumes.append(image1)
        masks.append(mask1)
        labels.append(1)
        patients.append("{:08d}".format(patient_counter))
        patient_counter += 1

        # Only create matrix with lumps centers if we are going to save it
        if save_lumps_pos:
            # Save all matrices with lumps centers for label 0 and 1
            pos_matrix0 = create_lumps_pos_matrix(lumps_pos=pos_lumps0, dim=params0[0])
            pos_matrix1 = create_lumps_pos_matrix(lumps_pos=pos_lumps1, dim=params1[0])
            centers.append(pos_matrix0)
            centers.append(pos_matrix1)

        # Create and show plots
        if show_images:
            num0 = image0.shape[2]
            num1 = image1.shape[2]
            middle0 = int(num0 / 2)
            middle1 = int(num1 / 2)
            if save_lumps_pos:
                fig = plt.figure(0)
                ax = fig.add_subplot(2, 3, 1)
                ax.imshow(pos_matrix0[:, :, middle0].T)
                ax.set_yticks([])
                ax.set_xticks([])
                ax.set_title("Label 0 - Centers Slice {}/{}".format(middle0, num0))
                ax = fig.add_subplot(2, 3, 2)
                ax.imshow(image0[:, :, middle0])
                ax.set_yticks([])
                ax.set_xticks([])
                ax.set_title("Label 0 - Slice {}/{}".format(middle0, num0))
                ax = fig.add_subplot(2, 3, 3)
                ax.imshow(masks[-2][:, :, middle0])
                ax.set_yticks([])
                ax.set_xticks([])
                ax.set_title("Label 0 - Mask Slice {}/{}".format(middle0, num0))
                ax = fig.add_subplot(2, 3, 4)
                ax.imshow(pos_matrix1[:, :, middle1].T)
                ax.set_yticks([])
                ax.set_xticks([])
                ax.set_title("Label 1 - Centers Slice {}/{}".format(middle1, num1))
                ax = fig.add_subplot(2, 3, 5)
                ax.imshow(image1[:, :, middle1])
                ax.set_yticks([])
                ax.set_xticks([])
                ax.set_title("Label 1 - Slice {}/{}".format(middle1, num1))
                ax = fig.add_subplot(2, 3, 6)
                ax.imshow(masks[-1][:, :, middle1])
                ax.set_yticks([])
                ax.set_xticks([])
                ax.set_title("Label 1 - Mask Slice {}/{}".format(middle1, num1))
            else:
                fig = plt.figure(0)
                ax = fig.add_subplot(2, 2, 1)
                ax.imshow(image0[:, :, middle0])
                ax.set_yticks([])
                ax.set_xticks([])
                ax.set_title("Label 0 - Slice {}/{}".format(middle0, num0))
                ax = fig.add_subplot(2, 2, 2)
                ax.imshow(masks[-2][:, :, middle0])
                ax.set_yticks([])
                ax.set_xticks([])
                ax.set_title("Label 0 - Mask Slice {}/{}".format(middle0, num0))
                ax = fig.add_subplot(2, 2, 3)
                ax.imshow(image1[:, :, middle1])
                ax.set_yticks([])
                ax.set_xticks([])
                ax.set_title("Label 1 - Slice {}/{}".format(middle1, num1))
                ax = fig.add_subplot(2, 2, 4)
                ax.imshow(masks[-1][:, :, middle1])
                ax.set_yticks([])
                ax.set_xticks([])
                ax.set_title("Label 1 - Mask Slice {}/{}".format(middle1, num1))
            plt.pause(0.00001)
            # If pause images is not set, we will see the images briefly one after another
            if pause_images:
                s = input("Press ENTER to see the next image, or Q (q) to disable pause:  ")
                if len(s) > 0 and s[0].lower() == "q":
                    pause_images = False

        if (i + 1) % split_distance == 0:
            print("{}. {}% loaded ({}/{} samples)".format(get_current_time(),
                                                          (i + 1) * 100 // num_samples,
                                                          i + 1, num_samples))
        elif num_samples > 8192 and i % 64 == 63:
            print("{}. {}% loaded ({}/{} samples)".format(get_current_time(),
                                                          (i + 1) * 100 // num_samples,
                                                          i + 1, num_samples))

    if show_images:
        plt.ioff()

    print(" ")
    print("Saving data, this may take a few minutes")
    # Save the volumes
    margin_suffix = "" if cut_edges_margin is None else "_m{}".format(cut_edges_margin)
    file_suffix = "{}_{}-{}".format(margin_suffix, number_first_patient,
                                    number_first_patient + num_samples)
    with open('{}{}_images.pkl'.format(dataset_name, file_suffix), 'wb') as f:
        pickle.dump(volumes, f)
    print("Data saved in '{}{}_images.pkl'.".format(dataset_name, file_suffix))

    with open('{}{}_labels.pkl'.format(dataset_name, file_suffix), 'wb') as f:
        pickle.dump(labels, f)
    print("Data saved in '{}{}_labels.pkl'.".format(dataset_name, file_suffix))

    with open('{}{}_patients.pkl'.format(dataset_name, file_suffix), 'wb') as f:
        pickle.dump(patients, f)
    print("Data saved in '{}{}_patients.pkl'.".format(dataset_name, file_suffix))

    with open('{}{}_masks.pkl'.format(dataset_name, file_suffix), 'wb') as f:
        pickle.dump(masks, f)
    print("Data saved in '{}{}_masks.pkl'.".format(dataset_name, file_suffix))

    if save_lumps_pos:
        centers = np.array(centers)
        np.save(dataset_name + "_centers", centers)
        print("Lumps centers saved in '{}.npy'.".format(dataset_name + "_centers"))

    if cut_edges_margin is None:
        print(" ")
        print("Dataset shape:    {}".format(np.array(volumes).shape))
        print("Dataset range:    {} - {}".format(np.array(volumes).min(), np.array(volumes).max()))
        print("Dataset median:   {}".format(np.median(volumes)))
        print("Dataset mean:     {}".format(np.mean(volumes)))
        print("Dataset std dev:  {}".format(np.std(volumes)))
        print("Labels shape:     {}".format(np.array(labels).shape))
        print("Labels available: {}".format(np.array(labels).shape))
        print("Patients range:   {} - {}".format(patients[0], patients[-1]))
        print("Masks shape:      {}".format(np.array(masks).shape))
        print("Masks range:      {} - {}".format(np.array(masks).min(), np.array(masks).max()))
        print("Masks median:     {}".format(np.median(masks)))
        print("Masks mean:       {}".format(np.mean(masks)))
        print("Masks std dev:    {}".format(np.std(masks)))
Example #55
0
def eaAlphaMuPlusLambdaCheckpoint(population,
                                  toolbox,
                                  mu,
                                  cxpb,
                                  mutpb,
                                  ngen,
                                  stats=None,
                                  halloffame=None,
                                  pf=None,
                                  nelite=3,
                                  cp_frequency=1,
                                  cp_filename=None,
                                  continue_cp=False,
                                  selection='selNSGA2',
                                  td=None):
    print(halloffame, pf)
    gen_vs_pop = []

    if continue_cp:
        # A file name has been given, then load the data from the file
        cp = pickle.load(open(cp_filename, "r"))
        population = cp["population"]
        parents = cp["parents"]
        start_gen = cp["generation"]
        halloffame = cp["halloffame"]
        logbook = cp["logbook"]
        history = cp["history"]
        random.setstate(cp["rndstate"])
    else:
        # Start a new evolution
        start_gen = 1
        parents = population[:]
        gen_vs_pop.append(population)
        logbook = deap.tools.Logbook()
        logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
        history = deap.tools.History()

        # TODO this first loop should be not be repeated !
        invalid_ind = _evaluate_invalid_fitness(toolbox, population)
        invalid_count = len(invalid_ind)
        gen_vs_hof = []
        halloffame, pf = _update_history_and_hof(halloffame, pf, history,
                                                 population, td)

        gen_vs_hof.append(halloffame)
        _record_stats(stats, logbook, start_gen, population, invalid_count)
    # Begin the generational process
    for gen in range(start_gen + 1, ngen + 1):
        offspring = _get_offspring(parents, toolbox, cxpb, mutpb)

        assert len(offspring) > 0
        population = parents + offspring
        gen_vs_pop.append(population)

        invalid_count = _evaluate_invalid_fitness(toolbox, offspring)
        halloffame, pf = _update_history_and_hof(halloffame, pf, history,
                                                 population, td)
        _record_stats(stats, logbook, gen, population, invalid_count)
        set_ = False

        if str('selIBEA') == selection:
            toolbox.register("select", tools.selIBEA)
            set_ = True
        if str('selNSGA') == selection:
            toolbox.register("select", selNSGA2)
            set_ = True
        assert set_ == True

        elite = _get_elite(halloffame, nelite)
        gen_vs_pop.append(copy.copy(population))
        parents = toolbox.select(population, mu)

        logger.info(logbook.stream)

        if (cp_filename):  # and cp_frequency and
            #gen % cp_frequency == 0):
            cp = dict(population=population,
                      generation=gen,
                      parents=parents,
                      halloffame=halloffame,
                      history=history,
                      logbook=logbook,
                      rndstate=random.getstate())
            pickle.dump(cp, open(cp_filename, "wb"))
            print('Wrote checkpoint to %s', cp_filename)
            logger.debug('Wrote checkpoint to %s', cp_filename)

        unique_values = [p.dtc.attrs.values() for p in population]
        print(unique_values, 'what the hell genes')
        assert len(unique_values) == len(set(unique_values))

        #print(set(gen_vs_pop[-1][0].dtc.attrs.values()) in set(population[0].dtc.attrs.values()))

    return population, halloffame, pf, logbook, history, gen_vs_pop
def save_data_object(path, data_object):
    with open(path, 'wb') as file_data:
        pickle.dump(file_data, data_object)
Example #57
0
 def save(self, path, values, name):
     f = open(path + dataset + '/' + name + '.pkl', 'wb')
     pickle.dump(values, f)
     f.close()
Example #58
0
import pickle
dc1 = {"Apple": 1, "Oranges": 2}
dc2 = {"Car": 2, "Bike": 1}

dict = {}
dict["Fruits"] = dc1
dict["Vehicle"] = dc2

out = dict
f = open("conf", "wb")
pickle.dump(out, f)

f = open("conf", "rb")
x = pickle.load(f)
print(x)
Example #59
0
File: note.py Project: rscv5/tframe
 def save(self, file_name):
     self._check_before_dump()
     with open(file_name, 'wb') as f:
         pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
Example #60
0
def train_model():
    with open('intents.json') as json_data:
        intents = json.load(json_data)

    words = []  #Design the Vocabulary (unique words)
    classes = []
    documents = []
    ignore_words = ['?']
    # loop through each sentence in our intents patterns
    for intent in intents['intents']:
        for pattern in intent['patterns']:
            # tokenize each word in the sentence
            w = nltk.word_tokenize(pattern)
            # add to our words list
            words.extend(w)
            # add to documents in our corpus
            documents.append((w, intent['tag']))
            # add to our classes list
            if intent['tag'] not in classes:
                classes.append(intent['tag'])

    stemmer = LancasterStemmer()

    # stem and lower each word and remove duplicates
    words = [stemmer.stem(w.lower()) for w in words if w not in ignore_words]
    words = sorted(list(set(words)))

    # remove duplicates
    classes = sorted(list(set(classes)))

    # create our training data
    training = []

    # create an empty array for our output
    output_empty = [0] * len(classes)

    # training set, bag of words for each sentence
    for doc in documents:
        # initialize our bag of words
        bag = []
        # list of tokenized words for the pattern (pattern = what user says)
        pattern_words = doc[0]
        # stem each word
        pattern_words = [stemmer.stem(word.lower()) for word in pattern_words]
        # create our bag of words array
        # mark the presence of words as a boolean value, 0 for absent, 1 for present.
        for w in words:
            bag.append(1) if w in pattern_words else bag.append(0)

        # output is a '0' for each tag and '1' for current tag
        output_row = list(output_empty)
        output_row[classes.index(doc[1])] = 1

        training.append([bag, output_row])

    # shuffle our features and turn into np.array
    random.shuffle(training)
    training = np.array(training)

    # create train and test lists
    train_x = list(training[:, 0])
    train_y = list(training[:, 1])

    # reset underlying graph data
    tf.reset_default_graph()
    # Build neural network
    net = tflearn.input_data(shape=[None, len(train_x[0])])
    net = tflearn.fully_connected(net, 8)
    net = tflearn.fully_connected(net, 8)
    net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')
    net = tflearn.regression(net)

    # Define model and setup tensorboard
    model = tflearn.DNN(net, tensorboard_dir='tflearn_logs')
    # Start training (apply gradient descent algorithm)
    model.fit(train_x, train_y, n_epoch=1000, batch_size=8, show_metric=True)
    model.save('model.tflearn')

    # save all of our data structures
    import pickle
    pickle.dump(
        {
            'words': words,
            'classes': classes,
            'train_x': train_x,
            'train_y': train_y
        }, open("training_data", "wb"))


#train_model()