Exemplo n.º 1
0
	def __init__(self, tlc=None, order=1, torder=0, directory=None, **kwargs):
		# initialize the Instrument, using the extra columns in the light curve

		dict = {}

		dict['rescaling'] = 1.0

		# a constant out of transit flux level
		try:
			dict['C'] = np.median(tlc.flux)
		except:
			dict['C'] = 1.0

		try:
			# include terms of (ev)^power for all the external variables available
			for power in np.arange(order)+1:
				for evkey in tlc.externalvariables.keys():
					dict[evkey + '_tothe{0:1d}'.format(power)] = 0.0

			# include (t)^power terms for time (CURRENTLY SET UP ONLY FOR SINGLE TRANSITS!)
			for power in np.arange(torder)+1:
				dict['t_tothe{0:1d}'.format(power)] = 0.0

			# include other custom instrument parameters
			for k in kwargs.keys():
				dict[k] = kwargs[k]
		except AttributeError:
			pass


		# include all the parameters explicitly defined here
		Parameters.__init__(self, directory=directory, **dict)
Exemplo n.º 2
0
	def __init__(self, **kwargs):

		# define defaults
		Parameters.__init__(self, 	J=0.0, \
									k=0.1, \
									rsovera=1.0/10.0, \
									b=0.0, \
									period=10.0, \
									t0=2456000.0, \
									dt=0.0, \
									semiamplitude=0.0, \
									esinw=0.0, \
									ecosw=0.0)

		# overwrite defaults with input keywords
		Parameters.__init__(self, **kwargs)

		# set up the parameter constraints
		self.J.parinfo['limited'] = [True, False]
		self.J.parinfo['limits'] = [0, 100]

		self.b.parinfo['limited'] = [True, True]
		self.b.parinfo['limits'] = [0.0, 1.0]

		#self.q.parinfo['limited'] = [True, False]
		#self.q.parinfo['limits'] = [0, 100]

		self.period.parinfo['limited'] = [True, False]
		self.period.parinfo['limits'] = [0, 100]

		self.t0.parinfo['limited'] = [True, False]
		self.t0.parinfo['limits'] = [0, 1000000000]
def extractFullMIToThesaurus():
	accents = Accents()
	parameters = Parameters()
	max_qty_terms = parameters.getMaxQtyTerms()
	seeds = Seeds()
	dic_seeds = seeds.getSeeds()
	mi_file = Statistic(stat_temp+'IMT_FullStatisticalCorpus.txt')

	try:
		thesaurus_file = codecs.open('../Data/Output/T3/T3_Jaccard.xml', 'w', 'utf-8')
	except IOError:
		print 'ERROR: System cannot open the  file ../Data/Output/T3/T3_Jaccard.xml'
		sys.exit()

	thesaurus_file.write('<?xml version="1.0" encoding="ISO-8859-1"?>\n<thesaurus>\n\t<ontology id="privacy">\n')
	for seed in dic_seeds:
		qty_terms = 0
		dic_related = mi_file.getOrderedNounMIForTerm(seed)
		if dic_related != False:
			thesaurus_file.write('\t\t<seed term_id="" term_name="'+accents.buildAccents(seed)+'" type="">\n')
			for mi_related in dic_related:
				if qty_terms < max_qty_terms:
					thesaurus_file.write('\t\t\t<term id="" display="ON" similarity="'+mi_related[0]+'">'+accents.buildAccents(mi_related[1])+'</term>\n')
					qty_terms += 1
			thesaurus_file.write('\t\t</seed>\n')
	thesaurus_file.write('\t</ontology>\n</thesaurus>')
	thesaurus_file.close()
Exemplo n.º 4
0
Arquivo: Star.py Projeto: zkbt/transit
	def __init__(self, **kwargs):
		Parameters.__init__(self, jitter=0.0, u1=0.2, u2=0.6, gd=0.32, albedo=0, mass=0.25)
		Parameters.__init__(self, **kwargs)

		self.u1.parinfo['limited'] = [True, True]
		self.u1.parinfo['limits'] = [0, 1]

		self.u2.parinfo['limited'] = [True, True]
		self.u2.parinfo['limits'] = [0, 1]
Exemplo n.º 5
0
	def initialize(self):
		problem = Problem()
		parameters = Parameters()
		problem_type = parameters.problem_type
		low_bound, high_bound = parameters.set_bounds(problem_type)
		self.position = np.random.uniform(low_bound, high_bound, parameters.dimension)
		self.velocity = np.random.uniform(parameters.min_vel, parameters.max_vel, parameters.dimension)
		self.cost = problem.cost_function(self.position)
		self.best_position = self.position[:]
		self.best_cost = self.cost
Exemplo n.º 6
0
def main(argv):

	parameters = Parameters(argv)
	hostname = parameters.getHostname()
	port = parameters.getPort()
	dbname = parameters.getDBName()
	language_1, language_2 = parameters.getLanguage()
	collection = parameters.getCollection()
	filexml_1 = parameters.getInputFile_1()
	filexml_2 = parameters.getInputFile_2()
	type_corpus = parameters.getType()

	print 'Using parameters of configuration: '
	print '- Host : ',hostname
	print '- Port : ',port
	print '- Coll : ',collection
	print '- DBase: ',dbname
	print '- XML1 : ',filexml_1
	print '- XML2 : ',filexml_2

	database = Mongo(hostname, dbname, collection)	

	dic_content_1 = OrderedDict()
	parserxml_1 = XML(filexml_1, language_1)
	dic_content_1 = parserxml_1.getContent()
	size_1 = len(dic_content_1)
	del parserxml_1

	dic_content_2 = OrderedDict()
	parserxml_2 = XML(filexml_2, language_2)
	dic_content_2 = parserxml_2.getContent()
	size_2 = len(dic_content_2)
	del parserxml_2
	
	counter = 1
	if size_1 == size_2:
		#As both files come from WebAligner, they must have the same number of documents
		for id_order in dic_content_1:
			id_file_1 = dic_content_1[id_order]['id_file']
			language_1 = dic_content_1[id_order]['language']
			content_1 = dic_content_1[id_order]['content']
			
			id_file_2 = dic_content_2[id_order]['id_file']
			language_2 = dic_content_2[id_order]['language']
			content_2 = dic_content_2[id_order]['content']

			if database.exists(language_1, id_file_1):
				if not database.exists(language_2, id_file_2):
					database.insertInExisting(language_1, id_file_1, language_2, id_file_2, content_2)
			else:
				if database.exists(language_2, id_file_2):
					database.insertInExisting(language_2, id_file_2, language_1, id_file_1, content_1)
				else:
					database.insertNewData(language_1, id_file_1, content_1, language_2, id_file_2, content_2, type_corpus, counter)
			counter += 1
	else:
		#Files have different number of documents, so they are not aligned
		print '\nError: Files not aligned. Please align them with WebAligner.'
Exemplo n.º 7
0
def main(argv):
	parameters = Parameters(argv)
	hostname = parameters.getHostname()
	port = parameters.getPort()
	dbname = parameters.getDBName()
	language_1, language_2 = parameters.getLanguage()
	collection = parameters.getCollection()
	fileinput_1 = parameters.getInputFile_1()
	fileinput_2 = parameters.getInputFile_2()
	type_corpus = parameters.getType()

	print 'Using parameters of configuration: '
	print '- Host : ',hostname
	print '- Port : ',port
	print '- Coll : ',collection
	print '- DBase: ',dbname
	print '- File1: ',fileinput_1
	print '- File2: ',fileinput_2

	database = Mongo(hostname, dbname, collection)	

	id_file_1 = (fileinput_1.split('/'))[-1]
	id_file_2 = (fileinput_2.split('/'))[-1]

	try:
		file_1 = codecs.open(fileinput_1, 'r', 'utf-8')
	except IOError:
		print 'ERROR: System cannot open the '+fileinput_1+' file'
		sys.exit(2)
	try:
		file_2 = codecs.open(fileinput_2, 'r', 'utf-8')
	except IOError:
		print 'ERROR: System cannot open the '+fileinput_2+' file'
		sys.exit(2)
	
	#Sentences indexed by the number of the line : number_line = _id (sentence)
	line_number = 1
	lines_2 = file_2.readlines()

	for counter, content_1 in enumerate(file_1):
		content_2 = lines_2[counter]

		if not database.exists(language_1, id_file_1) and not database.exists(language_2, id_file_2):
			database.insertNewData(language_1, id_file_1, content_1, language_2, id_file_2, content_2, type_corpus, line_number)
		else:
			if database.existsSentence(language_1, id_file_1, line_number):
				if not database.existsSentence(language_2, id_file_2, line_number):
					database.insertInExistingSentence(language_1, id_file_1, language_2, id_file_2, content_2, line_number)
			else:
				if database.existsSentence(language_2, id_file_2, line_number):
					database.insertInExistingSentence(language_2, id_file_2, language_1, id_file_1, content_1, line_number)
				else:
					database.insertNewSentence(language_1, id_file_1, content_1, language_2, id_file_2, content_2, line_number)
		if (line_number % 1000 == 0):
			print 'Indexing line: ',line_number
		line_number += 1
	def __init__(self):	
		self.corpus_folder = '../Data/Corpus/Raw/'
		self.full_corpus = '../Data/Corpus/Statistical/Full/'
		self.noun_corpus = '../Data/Corpus/Statistical/Noun/'
		self.parameters = Parameters()
		self.firstLoadFile = True
		self.__buildStatisticalCorpus__()
		command = "cat "+self.full_corpus+"*.txt >> "+self.full_corpus+"../FullStatisticalCorpus.txt"
		os.system(command)
		command = "cat "+self.noun_corpus+"*.txt >> "+self.full_corpus+"../NounStatisticalCorpus.txt"
		os.system(command)
Exemplo n.º 9
0
def generate_dataset(san,
                     train_prep,
                     train_ds,
                     test_prep,
                     test_ds,
                     gen_path,
                     train_id="train",
                     test_id="test",
                     max_epoch=0,
                     addParamFn=None,
                     phys=1,
                     san_acts=1,
                     san_phys=1):
    """
    Generate a new dataset for each epoch
    :param san: the sanitizer model
    :param train_prep: the preprocessing core for the train set
    :param train_dl: the train dataloader
    :param test_prep: the preprocessing core for the test set
    :param test_dl: the test data loader
    :param gen_path: the path where to save the generated dataset
    :param train_id: the id of the train set
    :param test_id: the id of the test set
    :param epoch: the current epoch
    :param addParamFn: function to add parameters in name
    """
    bs = 1024
    train_dl = data.DataLoader(train_ds,
                               batch_size=bs,
                               shuffle=False,
                               num_workers=1)
    test_dl = data.DataLoader(test_ds,
                              batch_size=bs,
                              shuffle=False,
                              num_workers=1)
    name = lambda n, e: "{}/{}_{}.csv".format(gen_path, n, addParamFn(e))

    def _generate_(san,
                   dataloader,
                   dset,
                   prep,
                   phys=phys,
                   san_acts=san_acts,
                   san_phys=san_phys):
        df = pd.DataFrame()
        select = lambda x, y, c: x if c else y
        cat = lambda x, ax: x if ax is None else torch.cat((ax, x), 0)
        a_xs, a_p, a_a, a_s, a_uid = None, None, None, None, None
        for i, sample in enumerate(dataloader):
            x = sample["sensor"].to(DEVICE)
            noise = NOISE(r=x.shape[0]).to(DEVICE)
            a = sample["act"].to(DEVICE)
            p = sample["phy"].to(DEVICE) * phys
            other_data = torch.cat((p, CTF(a) * san_acts, noise), 1)
            s = sample["sens"]
            xs, acs, ps = san(x, other_data)

            a = select(acs, a, san_acts == 1)
            p = select(ps, p, san_phys == 1)
            a_xs = cat(xs, a_xs)
            a_p = cat(p, a_p)
            a_a = cat(a, a_a)
            a_s = cat(s, a_s)
            a_uid = cat(sample["uid"], a_uid)

        df = dset.__inverse_transform_conv__(sensor_tensor=a_xs,
                                             phy=a_p,
                                             act_tensor=a_a,
                                             sens_tensor=a_s,
                                             user_id_tensor=a_uid,
                                             trials=dset.trials,
                                             cpu_device=CPU_DEVICE)
        p = prep.copy(True)
        p.df = df.reset_index(drop=True)
        p.inverse_transform()
        return p.df

    for epoch in tqdm.tqdm(range(1, max_epoch + 1)):
        # Load the sanitizer Model
        M.load_classifier_state(san,
                                epoch,
                                P.ModelsDir(),
                                ext="S",
                                otherParamFn=P.ParamFunction)
        # Check if there is a generated data in the correct format
        if not tryReading(name(train_id, epoch)):
            # Set does not exits
            df = _generate_(san, train_dl, train_ds, train_prep)
            df.to_csv(name(train_id, epoch), index=False)
        if not tryReading(name(test_id, epoch)):
            # Set does not exits
            df = _generate_(san, test_dl, test_ds, test_prep)
            df.to_csv(name(test_id, epoch), index=False)
Exemplo n.º 10
0
    default=508,
    help='give number of hidden units as an integer | (default = 508)')
parser.add_argument('--epochs',
                    type=int,
                    default=1,
                    help='give number of epochs as an integer | (default = 1)')
parser.add_argument('--gpu',
                    type=str,
                    default='cuda',
                    help='cuda or cpu | (default = cuda)')

arg_in = parser.parse_args()
parameters = Parameters({
    "arch": arg_in.arch,
    "epochs": arg_in.epochs,
    "gpu": arg_in.gpu,
    "hidden_units": arg_in.hidden_units,
    "learningrate": arg_in.learningrate,
    "save_directory": arg_in.save_directory
})

# Function that greet user
greetUser()
# Function that checks command line arguments using in_arg
parameters.displayParameters('training')
query = command("Please Enter to continue")
print('Do you want to modify input values?')
while True:
    print('‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡‡')
    print("## What you Want modify?                 ##")
    print("## 0 : modify a architecture             ##")
    print("## 1 : modify a learningrate             ##")
class StatisticalCorpus:

	def __init__(self):	
		self.corpus_folder = '../Data/Corpus/Raw/'
		self.full_corpus = '../Data/Corpus/Statistical/Full/'
		self.noun_corpus = '../Data/Corpus/Statistical/Noun/'
		self.parameters = Parameters()
		self.firstLoadFile = True
		self.__buildStatisticalCorpus__()
		command = "cat "+self.full_corpus+"*.txt >> "+self.full_corpus+"../FullStatisticalCorpus.txt"
		os.system(command)
		command = "cat "+self.noun_corpus+"*.txt >> "+self.full_corpus+"../NounStatisticalCorpus.txt"
		os.system(command)

	def __buildStatisticalCorpus__(self):
		try:
			root, dirs, files = os.walk(self.corpus_folder).next()[:3]
		except:
			print 'ERROR: It was not possible to open the ../Data/Corpus/Raw/ folder'
			sys.exit()

		accents = Accents()
		for corpus_file in files:
			if re.match('.*xml$', corpus_file):
				corpus_filename = corpus_file.split('.')[0]
				xmlfile = ParseXml(root+''+corpus_file)
				dic_terms = xmlfile.getDicTerms()
				dic_nouns = xmlfile.getNouns()
				dic_verbs = xmlfile.getVerbs()

				id_sentence = 1
				id_word = 1
				id_t = 's'+str(id_sentence)+'_'+str(id_word)

				string_full = ''
				string_nouns = ''
				while dic_terms.has_key(id_t):
					while dic_terms.has_key(id_t):
						if not re.match('^(pu|num|conj|art|prp|spec)', dic_terms[id_t]['pos']) and (re.search('[$]', dic_terms[id_t]['lemma']) is None) and (len(dic_terms[id_t]['lemma']) >= self.parameters.getMinWordSize()):
							lemma = accents.buildCodes(dic_terms[id_t]['lemma'])
							if dic_nouns.has_key(id_t):
								string_nouns += lemma+'__N '
								string_full += lemma+'__N '
							elif dic_verbs.has_key(id_t):
								string_nouns += lemma+'__V '
								string_full += lemma+'__V '
							else:
								string_full += lemma+'__O '
							string_nouns = string_nouns.replace('-', '_')
							string_full = string_full.replace('-', '_')
						id_word += 1
						id_t = 's'+str(id_sentence)+'_'+str(id_word)
					id_word = 1
					id_sentence += 1
					id_t = 's'+str(id_sentence)+'_'+str(id_word)
				self.__writeCorpusFile__(corpus_filename, string_full, string_nouns)		

	def __writeCorpusFile__(self, corpus_filename, string_full, string_nouns):
		try:
			write_full = codecs.open(self.full_corpus+''+corpus_filename+'.txt', 'w', 'utf-8')
		except IOError:
			print 'ERROR: System cannot open the '+self.full_corpus+''+corpus_filename+'.txt file'
			sys.exit()
		try:
			write_nouns = codecs.open(self.noun_corpus+''+corpus_filename+'.txt', 'w','utf-8')
		except IOError:
			print 'ERROR: System cannot open the '+self.noun_corpus+''+corpus_filename+'.txt file'
			sys.exit()
		write_full.write(string_full)
		write_nouns.write(string_nouns)
		write_full.close()
		write_nouns.close()
Exemplo n.º 12
0
 def __init__(self):
     Parameters.__init__(self)
     self.registerFloatParameter(
         "gravity", "X-component of gravitational acceleration")
Exemplo n.º 13
0
def main(type_atc, argv):
	list_relations = ['AN', 'SV', 'VO']

	date_start = datetime.datetime.now()
	date_start = date_start.strftime("%Y-%m-%d %H:%M:%S")

	parameters = Parameters(type_atc, argv)
	contexts = parameters.getContexts()
	svd_dimension = int(parameters.getSvdDimension())
	input_folder = parameters.getInputFolder()
	language = parameters.getLanguage()
	min_word_size = parameters.getMinWordSize()
	max_qty_terms = int(parameters.getMaxQtyTerms())
	output_folder = parameters.getOutputFolder()
	temp_folder = parameters.getTempFolder()
	record_log = parameters.getRecordLog()
	record_intermediate = parameters.getRecordIntermediate()
	seeds_file = parameters.getSeedsFile()
	stoplist_file = parameters.getStoplistFile()
	sim_measure = parameters.getSimilarityMeasure()
	del parameters

	logfile = LogFile(record_log, str(date_start), svd_dimension, input_folder, language, stoplist_file, min_word_size, max_qty_terms, None, output_folder, None, temp_folder, seeds_file, sim_measure)

	#if contexts:
	#	logfile.writeLogfile('- Building syntactics relations from '+temp_folder)
	#	contexts = Contexts(temp_folder)
	#	del contexts
	#else:
	#	logfile.writeLogfile('- Building syntactics relations from '+input_folder)
	#	ling_corpus = StanfordSyntacticContexts(input_folder, temp_folder, stoplist_file, min_word_size, record_intermediate)
	#	del ling_corpus

	matrix_relation = Matrix(temp_folder, svd_dimension, record_intermediate)		
	del matrix_relation

	#similarities = Similarities(seeds_file, temp_folder, 'cosine')
	#dic_topn = similarities.getTopNOrderedDic(10)
	#del Similarities
	
	#logfile.writeLogfile('- Building thesaurus in '+output_folder+'T_'+type_atc+'_'+sim_measure+'.xml')
	#thesaurus = Thesaurus(output_folder+'T_'+type_atc+'_'+sim_measure+'.xml',max_qty_terms)
	#thesaurus.write(dic_topn)
	#del thesaurus

	date_end = datetime.datetime.now()
	date_end = date_end.strftime("%Y-%m-%d %H:%M:%S")
	logfile.writeLogfile('- Thesaurus sucessfully built!\nEnding process at: '+str(date_end)+'.\n')
	del logfile
Exemplo n.º 14
0
class Slave: 
    def __init__(self, name, index=1, host='localhost', port=8080, debug=False, param_file=None, folder_loc='', delimeter=':::', cameras=12, acceptable_filetype_set=['.JPG','CR2'], destination=None):
        self.name = name
        self.debug = debug
        self.index = index
        self.port = port
        self.host = host
        self.delimeter = delimeter
        self.client = Client(host=host, port=port, debug=debug, delimeter=self.delimeter)
        self.param_file = param_file
        if self.param_file:
            self.writeParamHeaders()
            self.writeParamFilenames()
            self.closeParam()


        #functions that revolve around other instances
        self.cameras=cameras
        self.acceptable_filetype_set=acceptable_filetype_set

        self.folder_loc = folder_loc
        self.folder = Folder(self.folder_loc)
        self.checkFolders()

        self.client.connect()

    def __del__(self): 
        if self.client:
            self.client.close()
        
        if os.path.exists(self.folder_loc):
            shutil.rmtree(self.folder_loc)

    '''Param File Helper Functions'''
    def setParameters(self, name, destination):
        self.param_file = Parameters(name=self.name,destination=destination)
        self.writeParamHeaders()
        self.writeParamFilenames()
        self.closeParam()

    def writeParamHeaders(self):
        #if self.debug: 
        #    print "writeHeaders" 
        self.param_file.write("Name" + str(self.delimeter) + str(self.name) + "\n")
        self.param_file.write("Port" + str(self.delimeter) + str(self.port) + "\n")
        self.param_file.write("Host" + str(self.delimeter) + str(self.host) + "\n")
        self.param_file.write("Parameter File Location" + str(self.delimeter) + str(abspath(join(self.param_file.getDestination(), self.name + self.param_file.getFileType()))) + "\n")
    
    def writeParamFilenames(self): 
        #if self.debug:
        #    print "writeParamFilenames"
        file_list = self.getFilenames(self.folder)

        for f in file_list:
            self.param_file.write("File" + str(self.delimeter) + str(f) + "\n")
    
    def closeParam(self):
        self.param_file.close()

    def setFolderLocation(self, folder_loc):
        self.folder_loc = folder_loc
            
    def getFilenames(self, folder, file_list=[]):
        for file in folder.files:
            if file.type.upper() in self.acceptable_filetype_set:
                file_list.append(file.path)
        for folder in folder.folders: 
            file_list = self.getFilenames(folder=folder, file_list=file_list)
        return file_list

    def getFilenamesFolders(self, folder, folder_list=[]):
        for file in folder.files:
            if file.type.upper() in self.acceptable_filetype_set:
                if folder not in folder_list:
                    folder_list.append(folder)
        for folder in folder.folders: 
            file_list = self.getFilenamesFolders(folder=folder, folder_list=folder_list)
        return folder_list

    def checkData(self, folder, bad_data=[]):
        data_ok = True
        for data_type in self.acceptable_filetype_set:
            count_types = 0
            for file in folder.files:
                if file.type.upper() == data_type:
                    count_types += 1
            if (count_types != self.cameras) and (count_types > 0): 
                if [count_types, folder.path] not in bad_data:
                    data_ok = False
                    bad_data.append([count_types, folder.path])
        for folder in folder.folders: 
            self.checkData(folder=folder, bad_data=bad_data)
        return bad_data

    def moveTree(self, sourceRoot, destRoot):
        if not exists(destRoot):
            return False
        ok = True
        for path, dirs, files in walk(sourceRoot):

            relPath = relpath(path, sourceRoot)
            destPath = join(destRoot, relPath)
            if not exists(destPath):
                makedirs(destPath)
            for file in files:
                destFile = join(destPath, file)
                if isfile(destFile):
                    print "Skipping existing file: " + join(relPath, file)
                    remove(join(sourceRoot, file))
                    ok = False
                    continue
                srcFile = join(path, file)
                #print "rename", srcFile, destFile
                rename(srcFile, destFile)
        for path, dirs, files in walk(sourceRoot, False):
            if len(files) == 0 and len(dirs) == 0:
                rmdir(path)
        return ok

    def checkFolders(self):
        folders = self.getFilenamesFolders(self.folder)
        for folder in folders:
            bad_data = self.checkData(folder)
        while bad_data != []:
            if (len(bad_data)%2 == 0):
                print "Data problem in two folders..."
                data_set_1 = bad_data.pop()
                data_set_2 = bad_data.pop()
                size_of_datas = data_set_1[0]+data_set_2[0]
                if size_of_datas == self.cameras: 
                    print "Data appears to be fixable..."
                    self.moveTree(data_set_1[1], data_set_2[1])
                    print "Files moved, problem resolved between: " 
                    print data_set_1[1] 
                    print data_set_2[1]
            else: 
                print "Corrupt folders are not aligned... Please manually inspect them."
                continue
        self.folder = Folder(self.folder_loc)
                
    def run(self):
        self.client.sendParamFile(self.param_file.param_path)
        self.client.run()

    def close(self):
        self.client.close()
Exemplo n.º 15
0
class Runner(object):
    """
    A runner is a utilitary tool that allows to create environment, and run simulations more easily.
    This specific class as for main purpose to evaluate the performance of a trained :class:`grid2op.Agent`, rather
    than to train it. Of course, it is possible to adapt it for a specific training mechanisms. Examples of such
    will be made available in the future.

    Attributes
    ----------
    envClass: ``type``
        The type of the environment used for the game. The class should be given, and **not** an instance (object) of
        this class. The default is the :class:`grid2op.Environment`. If modified, it should derived from this class.

    actionClass: ``type``
        The type of action that can be performed by the agent / bot / controler. The class should be given, and
        **not** an instance of this class. This type
        should derived from :class:`grid2op.Action`. The default is :class:`grid2op.TopologyAction`.

    observationClass: ``type``
        This type represents the class that will be used to build the :class:`grid2op.Observation` visible by the
        :class:`grid2op.Agent`. As :attr:`Runner.actionClass`, this should be a type, and **not** and instance (object)
        of this type. This type should derived from :class:`grid2op.Observation`. The default is
        :class:`grid2op.CompleteObservation`.

    rewardClass: ``type``
        Representes the type used to build the rewards that are given to the :class:`Agent`. As
        :attr:`Runner.actionClass`, this should be a type, and **not** and instance (object) of this type.
        This type should derived from :class:`grid2op.Reward`. The default is :class:`grid2op.ConstantReward` that
        **should not** be used to train or evaluate an agent, but rather as debugging purpose.

    gridStateclass: ``type``
        This types control the mechanisms to read chronics and assign data to the powergrid. Like every "\.*Class"
        attributes the type should be pass and not an intance (object) of this type. Its default is
        :class:`grid2op.GridStateFromFile` and it must be a subclass of :class:`grid2op.GridValue`.

    legalActClass: ``type``
        This types control the mechanisms to assess if an :class:`grid2op.Action` is legal.
        Like every "\.*Class" attributes the type should be pass and not an intance (object) of this type.
        Its default is :class:`grid2op.AllwaysLegal` and it must be a subclass of :class:`grid2op.LegalAction`.

    backendClass: ``type``
        This types control the backend, *eg.* the software that computes the powerflows.
        Like every "\.*Class" attributes the type should be pass and not an intance (object) of this type.
        Its default is :class:`grid2op.PandaPowerBackend` and it must be a subclass of :class:`grid2op.Backend`.

    agentClass: ``type``
        This types control the type of Agent, *eg.* the bot / controler that will take :class:`grid2op.Action` and
        avoid cascading failures.
        Like every "\.*Class" attributes the type should be pass and not an intance (object) of this type.
        Its default is :class:`grid2op.DoNothingAgent` and it must be a subclass of :class:`grid2op.Agent`.

    logger:
        A object than can be used to log information, either in a text file, or by printing them to the command prompt.

    init_grid_path: ``str``
        This attributes store the path where the powergrid data are located. If a relative path is given, it will be
        extended as an absolute path.

    names_chronics_to_backend: ``dict``
        See description of :func:`grid2op.ChronicsHelper.initialize` for more information about this dictionnary

    parameters_path: ``str``, optional
        Where to look for the :class:`grid2op.Environment` :class:`grid2op.Parameters`. It defaults to ``None`` which
        corresponds to using default values.

    parameters: :class:`grid2op.Parameters`
        Type of _parameters used. This is an instance (object) of type :class:`grid2op.Parameters` initialized from
        :attr:`Runner.parameters_path`

    path_chron: ``str``
        Path indicatng where to look for temporal data.

    chronics_handler: :class:`grid2op.ChronicsHandler`
        Initialized from :attr:`Runner.gridStateclass` and :attr:`Runner.path_chron` it represents the input data used
        to generate grid state by the :attr:`Runner.env`

    backend: :class:`grid2op.Backend`
        Used to compute the powerflow. This object has the type given by :attr:`Runner.backendClass`

    env: :class:`grid2op.Environment`
        Represents the environment which the agent / bot / control must control through action. It is initialized from
        the :attr:`Runner.envClass`

    agent: :class:`grid2op.Agent`
        Represents the agent / bot / controler that takes action performed on a environment (the powergrid) to maximize
        a certain reward.

    verbose: ``bool``
        If ``True`` then detailed output of each steps are written.

    gridStateclass_kwargs: ``dict``
        Additional keyword arguments used to build the :attr:`Runner.chronics_handler`

    thermal_limit_a: ``numpy.ndarray``
        The thermal limit for the environment (if any).
    """

    def __init__(self,
                 # full path where grid state is located, eg "./data/test_Pandapower/case14.json"
                 init_grid_path: str,
                 path_chron,  # path where chronics of injections are stored
                 parameters_path=None,
                 names_chronics_to_backend=None,
                 actionClass=TopologyAction,
                 observationClass=CompleteObservation,
                 rewardClass=FlatReward,
                 legalActClass=AllwaysLegal,
                 envClass=Environment,
                 gridStateclass=GridStateFromFile,
                 # type of chronics to use. For example GridStateFromFile if forecasts are not used, or GridStateFromFileWithForecasts otherwise
                 backendClass=PandaPowerBackend,
                 agentClass=DoNothingAgent,  # class used to build the agent
                 agentInstance=None,
                 verbose=False,
                 gridStateclass_kwargs={},
                 thermal_limit_a=None
                 ):
        """
        Initialize the Runner.

        Parameters
        ----------
        init_grid_path: ``str``
            Madantory, used to initialize :attr:`Runner.init_grid_path`.

        path_chron: ``str``
            Madantory where to look for chronics data, used to initialize :attr:`Runner.path_chron`.

        parameters_path: ``str`` or ``dict``, optional
            Used to initialize :attr:`Runner.parameters_path`. If it's a string, this will suppose parameters are
            located at this path, if it's a dictionary, this will use the parameters converted from this dictionary.

        names_chronics_to_backend: ``dict``, optional
            Used to initialize :attr:`Runner.names_chronics_to_backend`.

        actionClass: ``type``, optional
            Used to initialize :attr:`Runner.actionClass`.

        observationClass: ``type``, optional
            Used to initialize :attr:`Runner.observationClass`.

        rewardClass: ``type``, optional
            Used to initialize :attr:`Runner.rewardClass`. Default to :class:`grid2op.ConstantReward` that
            *should not** be used to train or evaluate an agent, but rather as debugging purpose.

        legalActClass: ``type``, optional
            Used to initialize :attr:`Runner.legalActClass`.

        envClass: ``type``, optional
            Used to initialize :attr:`Runner.envClass`.

        gridStateclass: ``type``, optional
            Used to initialize :attr:`Runner.gridStateclass`.

        backendClass: ``type``, optional
            Used to initialize :attr:`Runner.backendClass`.

        agentClass: ``type``, optional
            Used to initialize :attr:`Runner.agentClass`.

        agentInstance: :class:`grid2op.Agent.Agent`
            Used to initialize the agent. Note that either :attr:`agentClass` or :attr:`agentInstance` is used
            at the same time. If both ot them are ``None`` or both of them are "not ``None``" it throw an error.

        verbose: ``bool``, optional
            Used to initialize :attr:`Runner.verbose`.

        thermal_limit_a: ``numpy.ndarray``
            The thermal limit for the environment (if any).

        """

        if not isinstance(envClass, type):
            raise Grid2OpException(
                "Parameter \"envClass\" used to build the Runner should be a type (a class) and not an object "
                "(an instance of a class). It is currently \"{}\"".format(
                    type(envClass)))
        if not issubclass(envClass, Environment):
            raise RuntimeError("Impossible to create a runner without an evnrionment derived from grid2op.Environement"
                               " class. Please modify \"envClass\" parameter.")
        self.envClass = envClass

        if not isinstance(actionClass, type):
            raise Grid2OpException(
                "Parameter \"actionClass\" used to build the Runner should be a type (a class) and not an object "
                "(an instance of a class). It is currently \"{}\"".format(
                    type(actionClass)))
        if not issubclass(actionClass, Action):
            raise RuntimeError("Impossible to create a runner without an action class derived from grid2op.Action. "
                               "Please modify \"actionClass\" parameter.")
        self.actionClass = actionClass

        if not isinstance(observationClass, type):
            raise Grid2OpException(
                "Parameter \"observationClass\" used to build the Runner should be a type (a class) and not an object "
                "(an instance of a class). It is currently \"{}\"".format(
                    type(observationClass)))
        if not issubclass(observationClass, Observation):
            raise RuntimeError("Impossible to create a runner without an observation class derived from "
                               "grid2op.Observation. Please modify \"observationClass\" parameter.")
        self.observationClass = observationClass

        if not isinstance(rewardClass, type):
            raise Grid2OpException(
                "Parameter \"rewardClass\" used to build the Runner should be a type (a class) and not an object "
                "(an instance of a class). It is currently \"{}\"".format(
                    type(rewardClass)))
    
        if not issubclass(rewardClass, Reward):
            raise RuntimeError("Impossible to create a runner without an observation class derived from "
                               "grid2op.Reward. Please modify \"rewardClass\" parameter.")
        self.rewardClass = rewardClass

        if not isinstance(gridStateclass, type):
            raise Grid2OpException(
                "Parameter \"gridStateclass\" used to build the Runner should be a type (a class) and not an object "
                "(an instance of a class). It is currently \"{}\"".format(
                    type(gridStateclass)))
        if not issubclass(gridStateclass, GridValue):
            raise RuntimeError("Impossible to create a runner without an chronics class derived from "
                               "grid2op.GridValue. Please modify \"gridStateclass\" parameter.")
        self.gridStateclass = gridStateclass

        if not isinstance(legalActClass, type):
            raise Grid2OpException(
                "Parameter \"legalActClass\" used to build the Runner should be a type (a class) and not an object "
                "(an instance of a class). It is currently \"{}\"".format(
                    type(legalActClass)))
        if not issubclass(legalActClass, LegalAction):

            raise RuntimeError("Impossible to create a runner without a class defining legal actions derived "
                               "from grid2op.LegalAction. Please modify \"legalActClass\" parameter.")
        self.legalActClass = legalActClass

        if not isinstance(backendClass, type):
            raise Grid2OpException(
                "Parameter \"legalActClass\" used to build the Runner should be a type (a class) and not an object "
                "(an instance of a class). It is currently \"{}\"".format(
                    type(backendClass)))
        if not issubclass(backendClass, Backend):
            raise RuntimeError("Impossible to create a runner without a backend class derived from grid2op.GridValue. "
                               "Please modify \"backendClass\" parameter.")
        self.backendClass = backendClass

        if agentClass is not None:
            if agentInstance is not None:
                raise RuntimeError("Impossible to build the backend. Only one of AgentClass or agentInstance can be "
                                   "used (both are not None).")
            if not isinstance(agentClass, type):
                raise Grid2OpException(
                    "Parameter \"agentClass\" used to build the Runner should be a type (a class) and not an object "
                    "(an instance of a class). It is currently \"{}\"".format(
                        type(agentClass)))
            if not issubclass(agentClass, Agent):
                raise RuntimeError("Impossible to create a runner without an agent class derived from grid2op.Agent. "
                                   "Please modify \"agentClass\" parameter.")
            self.agentClass = agentClass
            self._useclass = True
            self.agent = None
        elif agentInstance is not None:
            if not isinstance(agentInstance, Agent):
                raise RuntimeError("Impossible to create a runner without an agent class derived from grid2op.Agent. "
                                   "Please modify \"agentInstance\" parameter.")
            self.agentClass = None
            self._useclass = False
            self.agent = agentInstance
        else:
            raise RuntimeError("Impossible to build the backend. Either AgentClass or agentInstance must be provided "
                               "and both are None.")

        self.logger = ConsoleLog(
            DoNothingLog.INFO if verbose else DoNothingLog.ERROR)

        # store _parameters
        self.init_grid_path = init_grid_path
        self.names_chronics_to_backend = names_chronics_to_backend

        # game _parameters
        if isinstance(parameters_path, str):
            self.parameters_path = parameters_path
            self.parameters = Parameters(parameters_path)
        elif isinstance(parameters_path, dict):
            self.parameters = Parameters()
            self.parameters.init_from_dict(parameters_path)
        elif parameters_path is None:
            self.parameters_path = parameters_path
            self.parameters = Parameters()
        else:
            raise RuntimeError("Impossible to build the parameters. The argument \"parameters_path\" should either"
                               "be a string or a dictionary.")

        # chronics of grid state
        self.path_chron = path_chron
        self.gridStateclass_kwargs = gridStateclass_kwargs
        self.chronics_handler = ChronicsHandler(chronicsClass=self.gridStateclass,
                                                path=self.path_chron,
                                                **self.gridStateclass_kwargs)

        # the backend, used to compute powerflows
        self.backend = self.backendClass()

        # build the environment
        self.env = None

        self.verbose = verbose

        self.thermal_limit_a = thermal_limit_a

    def _new_env(self, chronics_handler, backend, parameters):
        res = self.envClass(init_grid_path=self.init_grid_path,
                            chronics_handler=chronics_handler,
                            backend=backend,
                            parameters=parameters,
                            names_chronics_to_backend=self.names_chronics_to_backend,
                            actionClass=self.actionClass,
                            observationClass=self.observationClass,
                            rewardClass=self.rewardClass,
                            legalActClass=self.legalActClass)

        if self.thermal_limit_a is not None:
            res.set_thermal_limit(self.thermal_limit_a)

        if self._useclass:
            agent = self.agentClass(res.helper_action_player)
        else:
            agent = self.agent
        return res, agent

    def init_env(self):
        """
        Function used to initialized the environment and the agent.
        It is called by :func:`Runner.reset`.

        Returns
        -------
        ``None``

        """
        self.env, self.agent = self._new_env(
            self.chronics_handler, self.backend, self.parameters)

    def reset(self):
        """
        Used to reset an environment. This method is called at the beginning of each new episode.
        If the environment is not initialized, then it initializes it with :func:`Runner.make_env`.

        Returns
        -------
        ``None``

        """
        if self.env is None:
            self.init_env()
        else:
            self.env.reset()

    def run_one_episode(self, indx=0, path_save=None):
        """
        Function used to run one episode of the :attr:`Runner.agent` and see how it performs in the :attr:`Runner.env`.

        Parameters
        ----------
        indx: ``int``
            The number of episode previously run

        path_save: ``str``, optional
            Path where to save the data. See the description of :mod:`grid2op.Runner` for the structure of the saved
            file.

        Returns
        -------
        cum_reward: ``float``
            The cumulative reward obtained by the agent during this episode

        time_step: ``int``
            The number of timesteps that have been played before the end of the episode (because of a "game over" or
            because there were no more data)

        """
        self.reset()
        res = self._run_one_episode(self.env, self.agent, self.logger, indx, path_save)
        return res

    @staticmethod
    def _run_one_episode(env, agent, logger, indx, path_save=None):
        done = False
        time_step = int(0)
        dict_ = {}
        time_act = 0.
        cum_reward = 0.

        # reset the environment
        env.chronics_handler.tell_id(indx-1)
        # the "-1" above is because the environment will be reset. So it will increase id of 1.
        obs = env.reset()

        # compute the size and everything if it needs to be stored
        nb_timestep_max = env.chronics_handler.max_timestep()
        efficient_storing = nb_timestep_max > 0
        nb_timestep_max = max(nb_timestep_max, 0)

        if path_save is None:
            # i don't store anything on drive, so i don't need to store anything on memory
            nb_timestep_max = 0

        times = np.full(nb_timestep_max, fill_value=np.NaN, dtype=np.float)
        rewards = np.full(nb_timestep_max, fill_value=np.NaN, dtype=np.float)
        actions = np.full((nb_timestep_max, env.action_space.n),
                          fill_value=np.NaN, dtype=np.float)
        env_actions = np.full(
            (nb_timestep_max, env.helper_action_env.n), fill_value=np.NaN, dtype=np.float)
        observations = np.full(
            (nb_timestep_max+1, env.observation_space.n), fill_value=np.NaN, dtype=np.float)
        disc_lines = np.full(
            (nb_timestep_max, env.backend.n_line), fill_value=np.NaN, dtype=np.bool)
        disc_lines_templ = np.full(
            (1, env.backend.n_line), fill_value=False, dtype=np.bool)

        if path_save is not None:
            # store observation at timestep 0
            if efficient_storing:
                observations[time_step, :] = obs.to_vect()
            else:
                observations = np.concatenate((observations, obs.to_vect()))

        episode = EpisodeData(actions=actions, env_actions=env_actions,
                          observations=observations,
                          rewards=rewards, disc_lines=disc_lines, times=times,
                          observation_space=env.observation_space,
                          action_space=env.action_space,
                          helper_action_env=env.helper_action_env,
                          path_save=path_save, disc_lines_templ=disc_lines_templ,
                          logger=logger, name=env.chronics_handler.get_name())

        episode.set_parameters(env)

        beg_ = time.time()

        reward = env.reward_range[0]
        done = False

        while not done:
            beg__ = time.time()
            act = agent.act(obs, reward, done)
            end__ = time.time()
            time_act += end__ - beg__

            obs, reward, done, info = env.step(act)  # should load the first time stamp
            cum_reward += reward
            time_step += 1

            episode.incr_store(efficient_storing, time_step, end__ - beg__,
                               reward, env.env_modification, act, obs, info)
        end_ = time.time()

        episode.set_meta(env, time_step, cum_reward)

        li_text = ["Env: {:.2f}s", "\t - apply act {:.2f}s", "\t - run pf: {:.2f}s",
                   "\t - env update + observation: {:.2f}s", "Agent: {:.2f}s", "Total time: {:.2f}s",
                   "Cumulative reward: {:1f}"]
        msg_ = "\n".join(li_text)
        logger.info(msg_.format(
            env._time_apply_act + env._time_powerflow + env._time_extract_obs,
            env._time_apply_act, env._time_powerflow, env._time_extract_obs,
            time_act, end_ - beg_, cum_reward))

        episode.set_episode_times(env, time_act, beg_, end_)

        episode.to_disk()

        name_chron = env.chronics_handler.get_name()

        return name_chron, cum_reward, int(time_step)

    def run_sequential(self, nb_episode, path_save=None):
        """
        This method is called to see how well an agent performed on a sequence of episode.

        Parameters
        ----------
        nb_episode: ``int``
            Number of episode to play.

        path_save: ``str``, optional
            If not None, it specifies where to store the data. See the description of this module :mod:`Runner` for
            more information

        Returns
        -------
        res: ``list``
            List of tuple. Each tuple having 5 elements:

              - "id_chron" unique identifier of the episode
              - "name_chron" name of chronics
              - "cum_reward" the cumulative reward obtained by the :attr:`Runner.Agent` on this episode i
              - "nb_time_step": the number of time steps played in this episode.
              - "max_ts" : the maximum number of time steps of the chronics

        """
        res = [(None, None, None, None, None) for _ in range(nb_episode)]
        for i in range(nb_episode):
            name_chron, cum_reward, nb_time_step = self.run_one_episode(path_save=path_save, indx=i)
            id_chron = self.chronics_handler.get_id()
            max_ts = self.chronics_handler.max_timestep()
            res[i] = (id_chron, name_chron, cum_reward, nb_time_step, max_ts)
        return res

    @staticmethod
    def _one_process_parrallel(runner, episode_this_process, process_id, path_save=None):
        chronics_handler = ChronicsHandler(chronicsClass=runner.gridStateclass,
                                           path=runner.path_chron,
                                           **runner.gridStateclass_kwargs)
        parameters = copy.deepcopy(runner.parameters)
        backend = runner.backendClass()
        nb_episode_this_process = len(episode_this_process)
        res = [(None, None, None) for _ in range(nb_episode_this_process)]
        for i, p_id in enumerate(episode_this_process):
            env, agent = runner._new_env(chronics_handler=chronics_handler,
                                         backend=backend,
                                         parameters=parameters)
            name_chron, cum_reward, nb_time_step = Runner._run_one_episode(
                env, agent, runner.logger, p_id, path_save)
            id_chron = chronics_handler.get_id()
            max_ts = chronics_handler.max_timestep()
            res[i] = (id_chron, name_chron, cum_reward, nb_time_step, max_ts)
        return res

    def run_parrallel(self, nb_episode, nb_process=1, path_save=None):
        """
        This method will run in parrallel, independantly the nb_episode over nb_process.

        Note that it restarts completely the :attr:`Runner.backend` and :attr:`Runner.env` if the computation
        is actually performed with more than 1 cores (nb_process > 1)

        It uses the python multiprocess, and especially the :class:`multiprocess.Pool` to perform the computations.
        This implies that all runs are completely independant (they happen in different process) and that the
        memory consumption can be big. Tests may be recommended if the amount of RAM is low.

        It has the same return type as the :func:`Runner.run_sequential`.

        Parameters
        ----------
        nb_episode: ``int``
            Number of episode to simulate

        nb_process: ``int``, optional
            Number of process used to play the nb_episode. Default to 1.

        path_save: ``str``, optional
            If not None, it specifies where to store the data. See the description of this module :mod:`Runner` for
            more information

        Returns
        -------
        res: ``list``
            List of tuple. Each tuple having 3 elements:

              - "i" unique identifier of the episode (compared to :func:`Runner.run_sequential`, the elements of the
                returned list are not necessarily sorted by this value)
              - "cum_reward" the cumulative reward obtained by the :attr:`Runner.Agent` on this episode i
              - "nb_time_step": the number of time steps played in this episode.
              - "max_ts" : the maximum number of time steps of the chronics

        """
        if nb_process <= 0:
            raise RuntimeError(
                "Runner: you need at least 1 process to run episodes")
        if nb_process == 1:
            warnings.warn(
                "Runner.run_parrallel: number of process set to 1. Failing back into sequential mod.")
            return [self.run_sequential(nb_episode, path_save=path_save)]
        else:
            if self.env is not None:
                self.env.close()
                self.env = None
            self.backend = self.backendClass()

            nb_process = int(nb_process)
            process_ids = [[] for i in range(nb_process)]
            for i in range(nb_episode):
                process_ids[i % nb_process].append(i)

            res = []
            with Pool(nb_process) as p:
                tmp = p.starmap(Runner._one_process_parrallel,
                                [(self, pn, i, path_save) for i, pn in enumerate(process_ids)])
            for el in tmp:
                res += el
        return res

    def run(self, nb_episode, nb_process=1, path_save=None):
        """
        Main method of the :class:`Runner` class. It will either call :func:`Runner.run_sequential` if "nb_process" is
        1 or :func:`Runner.run_parrallel` if nb_process >= 2.

        Parameters
        ----------
        nb_episode: ``int``
            Number of episode to simulate

        nb_process: ``int``, optional
            Number of process used to play the nb_episode. Default to 1.

        path_save: ``str``, optional
            If not None, it specifies where to store the data. See the description of this module :mod:`Runner` for
            more information

        Returns
        -------
        res: ``list``
            List of tuple. Each tuple having 3 elements:

              - "i" unique identifier of the episode (compared to :func:`Runner.run_sequential`, the elements of the
                returned list are not necessarily sorted by this value)
              - "cum_reward" the cumulative reward obtained by the :attr:`Runner.Agent` on this episode i
              - "nb_time_step": the number of time steps played in this episode.

        """
        if nb_episode < 0:
            raise RuntimeError(
                "Impossible to run a negative number of scenarios.")
        if nb_episode == 0:
            res = []
        else:
            if nb_process <= 0:
                raise RuntimeError(
                    "Impossible to run using less than 1 process.")
            if nb_process == 1:
                self.logger.info("Sequential runner used.")
                res = self.run_sequential(nb_episode, path_save=path_save)
            else:
                self.logger.info("Parrallel runner used.")
                res = self.run_parrallel(
                    nb_episode, nb_process=nb_process, path_save=path_save)
        return res
Exemplo n.º 16
0
    print("Main_Both2")
    print('Data loading...')
    t1,t_init = time(),time()
    args.device = device
    args.date_time = datetime.datetime.now()
    print(args.date_time)

    if args.method.lower() in ['tenet']:
        if args.knn_graph == 'True':
            dataset = EmbedDataset(args) ##change according to method
        else:
            dataset = TenetDataset(args) ##change according to method
    else:
        dataset = Dataset(args)

    params = Parameters(args,dataset)
    print("""Load data done [%.1f s]. #user:%d, #list:%d, #item:%d, #train:%d, #valid:%d, #test:%d"""% (time() - t1, params.num_user, params.num_list,
        params.num_item,params.num_train_instances,params.num_valid_instances,params.num_test_instances))

    args.args_str        = params.get_args_to_string()
    t1                   = time()
    print("args str: ",args.args_str)

    print("leng from list_items_list: ",len(utils.get_value_lists_as_list(params.list_items_dct)))
    print("leng from trainArrTriplets: ", len((params.trainArrTriplets[0])))
    print("non-zero entries in train_matrix: ", params.train_matrix.nnz)

    # model-loss-optimizer defn =======================================================================
    models               = Models(params,device=device)
    model                = models.get_model()
Exemplo n.º 17
0
    def setUp(self):
        """
        The case file is a representation of the case14 as found in the ieee14 powergrid.
        :return:
        """
        self.tolvect = 1e-2
        self.tol_one = 1e-5
        self.game_rules = GameRules()
        # pdb.set_trace()
        self.rewardClass = L2RPNReward
        self.reward_helper = self.rewardClass()
        self.obsClass = CompleteObservation
        self.parameters = Parameters()

        # powergrid
        self.backend = PandaPowerBackend()
        self.path_matpower = PATH_DATA_TEST_PP
        self.case_file = "test_case14.json"

        # chronics
        self.path_chron = os.path.join(PATH_CHRONICS, "chronics_with_forecast")
        self.chronics_handler = ChronicsHandler(
            chronicsClass=GridStateFromFileWithForecasts, path=self.path_chron)

        self.tolvect = 1e-2
        self.tol_one = 1e-5
        self.id_chron_to_back_load = np.array(
            [0, 1, 10, 2, 3, 4, 5, 6, 7, 8, 9])

        # force the verbose backend
        self.backend.detailed_infos_for_cascading_failures = True

        self.names_chronics_to_backend = {
            "loads": {
                "2_C-10.61": 'load_1_0',
                "3_C151.15": 'load_2_1',
                "14_C63.6": 'load_13_2',
                "4_C-9.47": 'load_3_3',
                "5_C201.84": 'load_4_4',
                "6_C-6.27": 'load_5_5',
                "9_C130.49": 'load_8_6',
                "10_C228.66": 'load_9_7',
                "11_C-138.89": 'load_10_8',
                "12_C-27.88": 'load_11_9',
                "13_C-13.33": 'load_12_10'
            },
            "lines": {
                '1_2_1': '0_1_0',
                '1_5_2': '0_4_1',
                '9_10_16': '8_9_2',
                '9_14_17': '8_13_3',
                '10_11_18': '9_10_4',
                '12_13_19': '11_12_5',
                '13_14_20': '12_13_6',
                '2_3_3': '1_2_7',
                '2_4_4': '1_3_8',
                '2_5_5': '1_4_9',
                '3_4_6': '2_3_10',
                '4_5_7': '3_4_11',
                '6_11_11': '5_10_12',
                '6_12_12': '5_11_13',
                '6_13_13': '5_12_14',
                '4_7_8': '3_6_15',
                '4_9_9': '3_8_16',
                '5_6_10': '4_5_17',
                '7_8_14': '6_7_18',
                '7_9_15': '6_8_19'
            },
            "prods": {
                "1_G137.1": 'gen_0_4',
                "3_G36.31": "gen_2_1",
                "6_G63.29": "gen_5_2",
                "2_G-56.47": "gen_1_0",
                "8_G40.43": "gen_7_3"
            },
        }

        # _parameters for the environment
        self.env_params = Parameters()

        self.env = Environment(
            init_grid_path=os.path.join(self.path_matpower, self.case_file),
            backend=self.backend,
            chronics_handler=self.chronics_handler,
            parameters=self.env_params,
            names_chronics_to_backend=self.names_chronics_to_backend,
            rewardClass=self.rewardClass)

        self.dict_ = {
            'name_gen':
            ['gen_1_0', 'gen_2_1', 'gen_5_2', 'gen_7_3', 'gen_0_4'],
            'name_load': [
                'load_1_0', 'load_2_1', 'load_13_2', 'load_3_3', 'load_4_4',
                'load_5_5', 'load_8_6', 'load_9_7', 'load_10_8', 'load_11_9',
                'load_12_10'
            ],
            'name_line': [
                '0_1_0', '0_4_1', '8_9_2', '8_13_3', '9_10_4', '11_12_5',
                '12_13_6', '1_2_7', '1_3_8', '1_4_9', '2_3_10', '3_4_11',
                '5_10_12', '5_11_13', '5_12_14', '3_6_15', '3_8_16', '4_5_17',
                '6_7_18', '6_8_19'
            ],
            'name_sub': [
                'sub_0', 'sub_1', 'sub_10', 'sub_11', 'sub_12', 'sub_13',
                'sub_2', 'sub_3', 'sub_4', 'sub_5', 'sub_6', 'sub_7', 'sub_8',
                'sub_9'
            ],
            'sub_info': [3, 6, 4, 6, 5, 6, 3, 2, 5, 3, 3, 3, 4, 3],
            'load_to_subid': [1, 2, 13, 3, 4, 5, 8, 9, 10, 11, 12],
            'gen_to_subid': [1, 2, 5, 7, 0],
            'line_or_to_subid':
            [0, 0, 8, 8, 9, 11, 12, 1, 1, 1, 2, 3, 5, 5, 5, 3, 3, 4, 6, 6],
            'line_ex_to_subid': [
                1, 4, 9, 13, 10, 12, 13, 2, 3, 4, 3, 4, 10, 11, 12, 6, 8, 5, 7,
                8
            ],
            'load_to_sub_pos': [5, 3, 2, 5, 4, 5, 4, 2, 2, 2, 3],
            'gen_to_sub_pos': [4, 2, 4, 1, 2],
            'line_or_to_sub_pos':
            [0, 1, 0, 1, 1, 0, 1, 1, 2, 3, 1, 2, 0, 1, 2, 3, 4, 3, 1, 2],
            'line_ex_to_sub_pos':
            [0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 2, 1, 1, 2, 0, 2, 3, 0, 3],
            'load_pos_topo_vect': [8, 12, 55, 18, 23, 29, 39, 42, 45, 48, 52],
            'gen_pos_topo_vect': [7, 11, 28, 34, 2],
            'line_or_pos_topo_vect': [
                0, 1, 35, 36, 41, 46, 50, 4, 5, 6, 10, 15, 24, 25, 26, 16, 17,
                22, 31, 32
            ],
            'line_ex_pos_topo_vect': [
                3, 19, 40, 53, 43, 49, 54, 9, 13, 20, 14, 21, 44, 47, 51, 30,
                37, 27, 33, 38
            ],
            'gen_type': ['solar', 'nuclear', 'nuclear', 'nuclear', 'thermal'],
            'gen_pmin': [0.0, 0.0, 0.0, 0.0, 0.0],
            'gen_pmax': [50.0, 80.0, 120.0, 120.0, 190.0],
            'gen_redispatchable': [False, True, True, True, True],
            'gen_max_ramp_up': [50.0, 80.0, 120.0, 120.0, 190.0],
            'gen_max_ramp_down': [50.0, 80.0, 120.0, 120.0, 190.0],
            'gen_min_uptime': [0, 0, 0, 0, 0],
            'gen_min_downtime': [0, 1, 0, 0, 0],
            'gen_cost_per_MW': [0.0, 0.0, 0.0, 0.0, 10.0],
            'gen_startup_cost': [0.0, 0.0, 0.0, 0.0, 0.0],
            'gen_shutdown_cost': [0.0, 0.0, 0.0, 0.0, 0.0],
            'subtype':
            'Observation.CompleteObservation'
        }

        self.dtypes = np.array([
            dtype('int64'),
            dtype('int64'),
            dtype('int64'),
            dtype('int64'),
            dtype('int64'),
            dtype('int64'),
            dtype('float64'),
            dtype('float64'),
            dtype('float64'),
            dtype('float64'),
            dtype('float64'),
            dtype('float64'),
            dtype('float64'),
            dtype('float64'),
            dtype('float64'),
            dtype('float64'),
            dtype('float64'),
            dtype('float64'),
            dtype('float64'),
            dtype('float64'),
            dtype('float64'),
            dtype('bool'),
            dtype('int64'),
            dtype('int64'),
            dtype('int64'),
            dtype('int64'),
            dtype('int64'),
            dtype('int64'),
            dtype('int64'),
            dtype('int64'),
            dtype('float64'),
            dtype('float64')
        ],
                               dtype=object)
        self.shapes = np.array([
            1, 1, 1, 1, 1, 1, 5, 5, 5, 11, 11, 11, 20, 20, 20, 20, 20, 20, 20,
            20, 20, 20, 20, 56, 20, 20, 14, 20, 20, 20, 5, 5
        ])
        self.size_obs = 454
Exemplo n.º 18
0
)
parser.add_argument(
    '--category_names',
    type=str,
    default='cat_to_name.json',
    help='file for flower name dictionary | (default = cat_to_name.json)')
parser.add_argument('--gpu',
                    type=str,
                    default='cuda',
                    help='cuda or cpu | (default = cuda)')

arg_in = parser.parse_args()
parameters = Parameters({
    "imagefilepath": arg_in.imagefilepath,
    "checkpoint": arg_in.checkpoint,
    "top_k": arg_in.top_k,
    "category_names": arg_in.category_names,
    "gpu": arg_in.gpu
})

# Function that greet user
greetUser()
# Function that checks command line arguments using in_arg
parameters.displayParameters('predict')

# Ask for image filepath to make predictions
if not parameters.in_arg['imagefilepath']:
    parameters.in_arg['imagefilepath'] = command(
        "Enter the image path, to predict its flower class \n e.g. flowers/test/101/image_07949.jpg \n Your input:"
    )
print('Do you want to modify input values?')
import sys
import time
from datetime import datetime

import numpy as np
import tensorflow as tf
from sklearn.metrics import confusion_matrix

from utils.Dataset_hdf5 import DatasetHDF5
from Parameters import Parameters

# ===============get basic folder=====================
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'models'))
para = Parameters(evaluation=True)
if para.gpu:
    gpus = tf.config.experimental.list_physical_devices('GPU')
    for gpu in gpus:
        tf.config.experimental.set_memory_growth(gpu, True)
else:
    os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# set parameters
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(para.decay_step)
BN_DECAY_CLIP = 0.99

MODEL = importlib.import_module(para.model)  # import network module
LOG_MODEL = para.logmodelDir
EVAL = para.evallog
Exemplo n.º 20
0
def main(argv):
	parameters = Parameters(argv)
	hostname = parameters.getHostname()
	port = parameters.getPort()
	dbname = parameters.getDBName()
	language_1, language_2 = parameters.getLanguage()
	collection = parameters.getCollection()
	input_folder = parameters.getInputFolder()
	type_corpus = parameters.getType()

	print 'Using parameters of configuration: '
	print '- Host : ',hostname
	print '- Port : ',port
	print '- Coll : ',collection
	print '- DBase: ',dbname
	print '- Input: ',input_folder

	database = Mongo(hostname, dbname, collection)	
	
	try:
		root, dirs, files = os.walk(input_folder).next()[:3]
	except IOError:
		print 'ERROR: It was not possible to open the '+input_folder+' folder'
		sys.exit(2)
		
	name_folder = (input_folder.split('/'))[-2]
	dic_files = {}
	for corpus_file in files:
		print 'Working on file: '+corpus_file
		if not re.match('~$', corpus_file):
			id_file = corpus_file[0:-7]
			language = corpus_file[-6:-4]
			if not dic_files.has_key(id_file):
				dic_files[id_file] = {'language_1': language}
			else:
				dic_files[id_file]['language_2'] = language

	counter = 1
	for filename in dic_files:
		language_1 = dic_files[filename]['language_1']
		language_2 = dic_files[filename]['language_2']
		id_file_1 = name_folder+'_'+filename+'_'+language_1
		id_file_2 = name_folder+'_'+filename+'_'+language_2

		try:
			file_1 = codecs.open(input_folder+''+filename+'_'+language_1+'.snt', 'r', 'utf-8')
		except IOError:
			print 'ERROR: System cannot open the '+input_folder+''+filename+'_'+language_1+'.snt file'
			sys.exit(2)
		try:
			file_2 = codecs.open(input_folder+''+filename+'_'+language_2+'.snt', 'r', 'utf-8')
		except IOError:
			print 'ERROR: System cannot open the '+input_folder+''+filename+'_'+language_2+'.snt file'
			sys.exit(2)
		
		content_1 = ''
		for line in file_1:
			#if line.strip():
			content_1 += line

		content_2 = ''
		for line in file_2:
			#if line.strip():
			content_2 += line

		if database.exists(language_1, id_file_1):
			if not database.exists(language_2, id_file_2):
				database.insertInExisting(language_1, id_file_1, language_2, id_file_2, content_2)
		else:
			if database.exists(language_2, id_file_2):
				database.insertInExisting(language_2, id_file_2, language_1, id_file_1, content_1)
			else:
				database.insertNewData(language_1, id_file_1, content_1, language_2, id_file_2, content_2, type_corpus, counter)
		counter += 1
Exemplo n.º 21
0
class Network:
    dirs = []
    listNames = []
    parameters = Parameters()

    @classmethod
    def start(cls):
        cls.dirs = os.listdir(
            os.path.abspath(os.getcwd()) + "/" + cls.parameters.folder)
        X, Y = cls.get_data()

        print(len(Y), len(X))
        model = cls.creatModel(X)
        print(K.eval(model.optimizer.lr))
        history = model.fit([X[:, 0], X[:, 1]],
                            Y,
                            batch_size=64,
                            epochs=cls.parameters.num_epochs,
                            verbose=2,
                            validation_split=.25)
        print(K.eval(model.optimizer.lr))
        cls.graph(history)

        return model

    @classmethod
    def test(cls, img, model):
        print("Тест...")
        cls.dirs = os.listdir(
            os.path.abspath(os.getcwd()) + "/" + cls.parameters.folder)
        matrImage = cls.readDataTest()

        masImg = np.zeros([1, img.shape[0], img.shape[1], 1])
        masImg[0, :, :, 0] = img
        masImg = masImg / 255
        list = []

        # распознаём изображение
        i = 0
        while i < cls.parameters.numMan * cls.parameters.sample:
            #img = matrImage[i] * 255
            #cv2.imwrite("image/" + str(i) + ".jpg", img[0,:,:,:])
            res = model.predict([masImg, matrImage[i]])
            list.append(res)
            #print(res, str(i))
            i += 1

        minZn = min(list)
        if minZn < 0.4:
            p = list.index(minZn)
            #print(minZn)
            p += 1
            print(cls.dirs[math.ceil(p / cls.parameters.sample) - 1])
        else:
            print("Изображение не идентифицировано.")

    @classmethod
    def creatModel(cls, mas):
        img_a = Input(shape=mas.shape[2:])
        img_b = Input(shape=mas.shape[2:])
        base_network = cls.build_base_network(mas.shape[2:])
        # Получаем вектора признаков
        feat_vecs_a = base_network(img_a)
        feat_vecs_b = base_network(img_b)
        distance = Lambda(cls.euclidean_distance)([feat_vecs_a, feat_vecs_b])
        rms = RMSprop(learning_rate=cls.parameters.learning_rate)
        model = Model([img_a, img_b], distance)

        model.compile(loss=cls.contrastive_loss,
                      optimizer=rms,
                      metrics=[cls.accuracy])
        #opt = tensorflow.keras.optimizers.Adam(lr=0.001)
        #model.compile(optimizer=opt, loss='contrastive_crossentropy', metrics=['accuracy'])

        return model

    @classmethod
    def get_data(cls):
        total_sample_size = cls.parameters.total_sample_size
        width = cls.parameters.width
        height = cls.parameters.height
        sample = cls.parameters.sample
        numMan = cls.parameters.numMan
        nameFolder = cls.parameters.folder
        img = cv2.imread(nameFolder + "\\" + cls.dirs[0] + "\\" + str(1) +
                         '.' + cls.parameters.typeel)
        img = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)

        image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        dim1 = image.shape[0]
        dim2 = image.shape[1]

        count = 0

        x_geuine_pair = np.zeros([total_sample_size, 2, dim1, dim2, 1])
        y_genuine = np.zeros([total_sample_size, 1])

        for i in range(numMan):
            print(cls.dirs[i])
            #for j in range(int((numMan * (numMan - 1)) / 2)):
            for j in range(int(total_sample_size / numMan)):
                ind1 = 0
                ind2 = 0
                # read images from same directory (genuine pair)
                while ind1 == ind2:
                    ind1 = np.random.randint(sample)
                    ind2 = np.random.randint(sample)

                # read the two images
                img1 = cv2.imread(nameFolder + "\\" + cls.dirs[i] + "\\" +
                                  str(ind1 + 1) + "." + cls.parameters.typeel)
                img2 = cv2.imread(nameFolder + "\\" + cls.dirs[i] + "\\" +
                                  str(ind2 + 1) + "." + cls.parameters.typeel)

                img1 = cv2.resize(img1, (width, height),
                                  interpolation=cv2.INTER_AREA)
                img2 = cv2.resize(img2, (width, height),
                                  interpolation=cv2.INTER_AREA)
                # to gray
                img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
                img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

                # store the images to the initialized numpy array
                x_geuine_pair[count, 0, :, :, 0] = img1
                x_geuine_pair[count, 1, :, :, 0] = img2

                # as we are drawing images from the same directory we assign label as 1. (genuine pair)
                y_genuine[count] = 1
                count += 1

        count = 0
        x_imposite_pair = np.zeros([total_sample_size, 2, dim1, dim2, 1])
        y_imposite = np.zeros([total_sample_size, 1])

        #for i in range(int((numMan * (numMan - 1)) / 2)):
        for i in range(int(total_sample_size / sample)):
            for j in range(sample):
                ind1 = 0
                ind2 = 0
                # read images from different directory (imposite pair)
                while ind1 == ind2:
                    ind1 = np.random.randint(numMan)
                    ind2 = np.random.randint(numMan)

                img1 = cv2.imread(nameFolder + "\\" + cls.dirs[ind1] + "\\" +
                                  str(j + 1) + "." + cls.parameters.typeel)
                img2 = cv2.imread(nameFolder + "\\" + cls.dirs[ind2] + "\\" +
                                  str(j + 1) + "." + cls.parameters.typeel)

                img1 = cv2.resize(img1, (width, height),
                                  interpolation=cv2.INTER_AREA)
                img2 = cv2.resize(img2, (width, height),
                                  interpolation=cv2.INTER_AREA)
                # to gray
                img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
                img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

                x_imposite_pair[count, 0, :, :, 0] = img1
                x_imposite_pair[count, 1, :, :, 0] = img2
                # as we are drawing images from the different directory we assign label as 0. (imposite pair)
                y_imposite[count] = 0
                count += 1

        # now, concatenate, genuine pairs and imposite pair to get the whole data
        X = np.concatenate([x_geuine_pair, x_imposite_pair], axis=0) / 255
        Y = np.concatenate([y_genuine, y_imposite], axis=0)

        return X, Y

    @classmethod
    def build_base_network(cls, input_shape):
        """
        filters - кол-во выходных фильтров
        kernel_size - ширина и высота ядра двумерной свертки
        pool_size - размер окна пулинга
        strides - шаг сдвига окна
        activation - функция активации
        """
        seq = Sequential()

        nb_filter = [6, 12]
        kernel_size = 3

        seq.add(
            Conv2D(filters=nb_filter[0],
                   kernel_size=(kernel_size, kernel_size),
                   padding='valid',
                   input_shape=input_shape,
                   activation='relu'))
        seq.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        seq.add(Dropout(0.25))

        seq.add(
            Conv2D(filters=nb_filter[1],
                   kernel_size=(kernel_size, kernel_size),
                   padding='valid',
                   activation='relu'))
        seq.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        seq.add(Dropout(0.25))

        seq.add(Flatten())
        seq.add(Dense(100, activation='softmax'))

        return seq

    @classmethod
    def continueTraining(cls, model):
        cls.dirs = os.listdir(
            os.path.abspath(os.getcwd()) + "/" + cls.parameters.folder)
        newX, newY = cls.get_data()
        history = model.fit([newX[:, 0], newX[:, 1]],
                            newY,
                            validation_split=.25,
                            batch_size=64,
                            epochs=cls.parameters.num_epochs,
                            verbose=2)
        cls.graph(history)

        return model

    @classmethod
    def euclidean_distance(cls, vects):
        x, y = vects
        sum_square = K.sum(K.square(x - y), axis=1, keepdims=True)
        return K.sqrt(K.maximum(sum_square, K.epsilon()))

    @classmethod
    def contrastive_loss(cls, y_true, y_pred):
        margin = 1
        square_pred = K.square(y_pred)
        margin_square = K.square(K.maximum(margin - y_pred, 0))
        return K.mean(y_true * square_pred + (1 - y_true) * margin_square)

    @classmethod
    def compute_accuracy(cls, predictions, labels):
        pred = labels.ravel() < 0.5
        return np.mean(pred == predictions)

    @classmethod
    def accuracy(cls, y_true, y_pred):
        return K.mean(K.equal(y_true, K.cast(y_pred < 0.5, y_true.dtype)))

    @classmethod
    def graph(cls, history):
        plt.subplot(2, 2, 1)
        plt.plot(history.history['accuracy'])
        plt.title('Train accuracy')
        plt.ylabel('Точность')
        plt.xlabel('Эпохи')
        plt.legend(['Обучающая'], loc='upper left')

        plt.subplot(2, 2, 2)
        plt.plot(history.history['loss'])
        plt.title('Train loss')
        plt.ylabel('Потеря')
        plt.xlabel('Эпохи')
        plt.legend(['Обучающая'], loc='upper left')

        plt.subplot(2, 2, 3)
        plt.plot(history.history['val_accuracy'])
        plt.title('Val accuracy')
        plt.ylabel('Точность')
        plt.xlabel('Эпохи')
        plt.legend(['Обучающая'], loc='upper left')

        plt.subplot(2, 2, 4)
        plt.plot(history.history['val_loss'])
        plt.title('Val loss')
        plt.ylabel('Потеря')
        plt.xlabel('Эпохи')
        plt.legend(['Обучающая'], loc='upper left')
        plt.show()

    @classmethod
    def modelSave(cls, model):
        #model.save_weights('Models/model_w.h5')
        model.save('Models/model_w.h5')

    @classmethod
    def modelLoad(cls, fl):
        mas = np.zeros([
            cls.parameters.total_sample_size * 2, 2, cls.parameters.height,
            cls.parameters.width, 1
        ])
        new_model = cls.creatModel(mas)
        new_model.load_weights(fl)
        return new_model

    @classmethod
    def readDataTest(cls):
        count = 0
        sample = cls.parameters.sample
        numMan = cls.parameters.numMan
        width = cls.parameters.width
        height = cls.parameters.height
        matrImage = np.zeros([sample * numMan, 1, height, width, 1])
        for i in range(0, numMan):
            for j in range(0, sample):
                img = cv2.imread(cls.parameters.folder + "\\" + cls.dirs[i] +
                                 "\\" + str(j + 1) + '.' +
                                 cls.parameters.typeel)
                img = cv2.resize(img, (width, height),
                                 interpolation=cv2.INTER_AREA)
                img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                matrImage[count, 0, :, :, 0] = img
                count += 1
        matrImage = matrImage / 255
        return matrImage
import jieba
from Parameters import Parameters
import numpy as np
from random import randint
from random import shuffle
import gensim
from gensim.models import KeyedVectors
from sklearn import feature_extraction
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from gensim.models import word2vec

pm = Parameters()

# model = KeyedVectors.load_word2vec_format('model/sgns.weibo.word')
model = gensim.models.Word2Vec.load('model/Word60.model')
'''获取停用词表'''


def makeStopWord():
    with open(pm.stop_word, 'r', encoding='utf-8') as f:
        lines = f.read()
        words = jieba.lcut(lines, cut_all=False)
        stopWord = []
        for word in words:
            stopWord.append(word)
    return stopWord


'''所有词语转化为向量'''
Exemplo n.º 23
0
def parameters():
    from Parameters import Parameters
    return Parameters()
Exemplo n.º 24
0
    def __init__(self,
                 # full path where grid state is located, eg "./data/test_Pandapower/case14.json"
                 init_grid_path: str,
                 path_chron,  # path where chronics of injections are stored
                 parameters_path=None,
                 names_chronics_to_backend=None,
                 actionClass=TopologyAction,
                 observationClass=CompleteObservation,
                 rewardClass=FlatReward,
                 legalActClass=AllwaysLegal,
                 envClass=Environment,
                 gridStateclass=GridStateFromFile,
                 # type of chronics to use. For example GridStateFromFile if forecasts are not used, or GridStateFromFileWithForecasts otherwise
                 backendClass=PandaPowerBackend,
                 agentClass=DoNothingAgent,  # class used to build the agent
                 agentInstance=None,
                 verbose=False,
                 gridStateclass_kwargs={},
                 thermal_limit_a=None
                 ):
        """
        Initialize the Runner.

        Parameters
        ----------
        init_grid_path: ``str``
            Madantory, used to initialize :attr:`Runner.init_grid_path`.

        path_chron: ``str``
            Madantory where to look for chronics data, used to initialize :attr:`Runner.path_chron`.

        parameters_path: ``str`` or ``dict``, optional
            Used to initialize :attr:`Runner.parameters_path`. If it's a string, this will suppose parameters are
            located at this path, if it's a dictionary, this will use the parameters converted from this dictionary.

        names_chronics_to_backend: ``dict``, optional
            Used to initialize :attr:`Runner.names_chronics_to_backend`.

        actionClass: ``type``, optional
            Used to initialize :attr:`Runner.actionClass`.

        observationClass: ``type``, optional
            Used to initialize :attr:`Runner.observationClass`.

        rewardClass: ``type``, optional
            Used to initialize :attr:`Runner.rewardClass`. Default to :class:`grid2op.ConstantReward` that
            *should not** be used to train or evaluate an agent, but rather as debugging purpose.

        legalActClass: ``type``, optional
            Used to initialize :attr:`Runner.legalActClass`.

        envClass: ``type``, optional
            Used to initialize :attr:`Runner.envClass`.

        gridStateclass: ``type``, optional
            Used to initialize :attr:`Runner.gridStateclass`.

        backendClass: ``type``, optional
            Used to initialize :attr:`Runner.backendClass`.

        agentClass: ``type``, optional
            Used to initialize :attr:`Runner.agentClass`.

        agentInstance: :class:`grid2op.Agent.Agent`
            Used to initialize the agent. Note that either :attr:`agentClass` or :attr:`agentInstance` is used
            at the same time. If both ot them are ``None`` or both of them are "not ``None``" it throw an error.

        verbose: ``bool``, optional
            Used to initialize :attr:`Runner.verbose`.

        thermal_limit_a: ``numpy.ndarray``
            The thermal limit for the environment (if any).

        """

        if not isinstance(envClass, type):
            raise Grid2OpException(
                "Parameter \"envClass\" used to build the Runner should be a type (a class) and not an object "
                "(an instance of a class). It is currently \"{}\"".format(
                    type(envClass)))
        if not issubclass(envClass, Environment):
            raise RuntimeError("Impossible to create a runner without an evnrionment derived from grid2op.Environement"
                               " class. Please modify \"envClass\" parameter.")
        self.envClass = envClass

        if not isinstance(actionClass, type):
            raise Grid2OpException(
                "Parameter \"actionClass\" used to build the Runner should be a type (a class) and not an object "
                "(an instance of a class). It is currently \"{}\"".format(
                    type(actionClass)))
        if not issubclass(actionClass, Action):
            raise RuntimeError("Impossible to create a runner without an action class derived from grid2op.Action. "
                               "Please modify \"actionClass\" parameter.")
        self.actionClass = actionClass

        if not isinstance(observationClass, type):
            raise Grid2OpException(
                "Parameter \"observationClass\" used to build the Runner should be a type (a class) and not an object "
                "(an instance of a class). It is currently \"{}\"".format(
                    type(observationClass)))
        if not issubclass(observationClass, Observation):
            raise RuntimeError("Impossible to create a runner without an observation class derived from "
                               "grid2op.Observation. Please modify \"observationClass\" parameter.")
        self.observationClass = observationClass

        if not isinstance(rewardClass, type):
            raise Grid2OpException(
                "Parameter \"rewardClass\" used to build the Runner should be a type (a class) and not an object "
                "(an instance of a class). It is currently \"{}\"".format(
                    type(rewardClass)))
    
        if not issubclass(rewardClass, Reward):
            raise RuntimeError("Impossible to create a runner without an observation class derived from "
                               "grid2op.Reward. Please modify \"rewardClass\" parameter.")
        self.rewardClass = rewardClass

        if not isinstance(gridStateclass, type):
            raise Grid2OpException(
                "Parameter \"gridStateclass\" used to build the Runner should be a type (a class) and not an object "
                "(an instance of a class). It is currently \"{}\"".format(
                    type(gridStateclass)))
        if not issubclass(gridStateclass, GridValue):
            raise RuntimeError("Impossible to create a runner without an chronics class derived from "
                               "grid2op.GridValue. Please modify \"gridStateclass\" parameter.")
        self.gridStateclass = gridStateclass

        if not isinstance(legalActClass, type):
            raise Grid2OpException(
                "Parameter \"legalActClass\" used to build the Runner should be a type (a class) and not an object "
                "(an instance of a class). It is currently \"{}\"".format(
                    type(legalActClass)))
        if not issubclass(legalActClass, LegalAction):

            raise RuntimeError("Impossible to create a runner without a class defining legal actions derived "
                               "from grid2op.LegalAction. Please modify \"legalActClass\" parameter.")
        self.legalActClass = legalActClass

        if not isinstance(backendClass, type):
            raise Grid2OpException(
                "Parameter \"legalActClass\" used to build the Runner should be a type (a class) and not an object "
                "(an instance of a class). It is currently \"{}\"".format(
                    type(backendClass)))
        if not issubclass(backendClass, Backend):
            raise RuntimeError("Impossible to create a runner without a backend class derived from grid2op.GridValue. "
                               "Please modify \"backendClass\" parameter.")
        self.backendClass = backendClass

        if agentClass is not None:
            if agentInstance is not None:
                raise RuntimeError("Impossible to build the backend. Only one of AgentClass or agentInstance can be "
                                   "used (both are not None).")
            if not isinstance(agentClass, type):
                raise Grid2OpException(
                    "Parameter \"agentClass\" used to build the Runner should be a type (a class) and not an object "
                    "(an instance of a class). It is currently \"{}\"".format(
                        type(agentClass)))
            if not issubclass(agentClass, Agent):
                raise RuntimeError("Impossible to create a runner without an agent class derived from grid2op.Agent. "
                                   "Please modify \"agentClass\" parameter.")
            self.agentClass = agentClass
            self._useclass = True
            self.agent = None
        elif agentInstance is not None:
            if not isinstance(agentInstance, Agent):
                raise RuntimeError("Impossible to create a runner without an agent class derived from grid2op.Agent. "
                                   "Please modify \"agentInstance\" parameter.")
            self.agentClass = None
            self._useclass = False
            self.agent = agentInstance
        else:
            raise RuntimeError("Impossible to build the backend. Either AgentClass or agentInstance must be provided "
                               "and both are None.")

        self.logger = ConsoleLog(
            DoNothingLog.INFO if verbose else DoNothingLog.ERROR)

        # store _parameters
        self.init_grid_path = init_grid_path
        self.names_chronics_to_backend = names_chronics_to_backend

        # game _parameters
        if isinstance(parameters_path, str):
            self.parameters_path = parameters_path
            self.parameters = Parameters(parameters_path)
        elif isinstance(parameters_path, dict):
            self.parameters = Parameters()
            self.parameters.init_from_dict(parameters_path)
        elif parameters_path is None:
            self.parameters_path = parameters_path
            self.parameters = Parameters()
        else:
            raise RuntimeError("Impossible to build the parameters. The argument \"parameters_path\" should either"
                               "be a string or a dictionary.")

        # chronics of grid state
        self.path_chron = path_chron
        self.gridStateclass_kwargs = gridStateclass_kwargs
        self.chronics_handler = ChronicsHandler(chronicsClass=self.gridStateclass,
                                                path=self.path_chron,
                                                **self.gridStateclass_kwargs)

        # the backend, used to compute powerflows
        self.backend = self.backendClass()

        # build the environment
        self.env = None

        self.verbose = verbose

        self.thermal_limit_a = thermal_limit_a
import sys
import os
import re
import codecs

from collections import defaultdict
from StatisticalCorpus import StatisticalCorpus
from Parameters import Parameters
from Seeds import Seeds
from Accents import Accents

temp_folder = '../Temp/'
stat_corpus = '../Data/Corpus/Statistical/'
stat_temp = temp_folder+'Statistical/'
output_folder = '../Data/Output/'
parameters = Parameters()
max_qty_terms = parameters.getMaxQtyTerms()
seeds = Seeds()
list_seeds = seeds.getSeeds()
accents = Accents()

def mainscript():
	StatisticalCorpus()
	executeMutualInformation('Full')
	executeMutualInformation('Noun')
	getThesaurusFromSeeds('Full')
	getThesaurusFromSeeds('Noun')

def executeMutualInformation(typefile):
	command = 'count.pl --ngram 2 --window '+str(parameters.getWindowSize())+' '+stat_temp+'W'+str(parameters.getWindowSize())+'_'+typefile+'StatisticalCorpus.txt '+stat_corpus+''+typefile+'StatisticalCorpus.txt'
	os.system(command)
Exemplo n.º 26
0
def main(argv):
	parameters = Parameters(argv)
	hostname = parameters.getHostname()
	port = parameters.getPort()
	dbname = parameters.getDBName()
	language_1, language_2 = parameters.getLanguage()
	collection = parameters.getCollection()
	input_folder = parameters.getInputFolder()
	type_corpus = parameters.getType()

	print 'Using parameters of configuration: '
	print '- Host : ',hostname
	print '- Port : ',port
	print '- Coll : ',collection
	print '- DBase: ',dbname
	print '- Input: ',input_folder

	database = Mongo(hostname, dbname, collection)	

	try:
		root, dirs, files = os.walk(input_folder+''+language_1+'/').next()[:3]
	except IOError:
		print 'ERROR: It was not possible to open the '+input_folder+'en/ folder'
		sys.exit(2)
		
	for corpus_file in files:
		#if (corpus_file ~ "/~/$"):
		if not '.txt~' in corpus_file:
			print 'Working on file: '+corpus_file
			id_file_1 = language_1+'_'+corpus_file[0:-4]
			id_file_2 = language_2+'_'+corpus_file[0:-4]

			try:
				file_1 = codecs.open(input_folder+''+language_1+'/'+corpus_file, 'r', 'utf-8')
			except IOError:
				print 'ERROR: System cannot open the '+root+''+corpus_file+' file'
				sys.exit(2)
			try:
				file_2 = codecs.open(input_folder+''+language_2+'/'+corpus_file, 'r', 'utf-8')
			except IOError:
				print 'ERROR: System cannot open the '+root+'../'+language_2+'/'+corpus_file+' file'
				sys.exit(2)
	
			#Sentences indexed by the number of the line : number_line = _id (sentence)
			line_number = 1
			lines_2 = file_2.readlines()
			content_1 = ''
			content_2 = ''
			for counter, line in enumerate(file_1):
				if re.match('(^<)', line):
					if content_1 != '' and content_2 != '':
						if not database.exists(language_1, id_file_1) and not database.exists(language_2, id_file_2):
							database.insertNewData(language_1, id_file_1, content_1, language_2, id_file_2, content_2, type_corpus, line_number)
						else:
							if database.existsSentence(language_1, id_file_1, line_number):
								if not database.existsSentence(language_2, id_file_2, line_number):
									database.insertInExistingSentence(language_1, id_file_1, language_2, id_file_2, content_2, line_number)
							else:
								if database.existsSentence(language_2, id_file_2, line_number):
									database.insertInExistingSentence(language_2, id_file_2, language_1, id_file_1, content_1, line_number)
								else:
									database.insertNewSentence(language_1, id_file_1, content_1, language_2, id_file_2, content_2, line_number)
						line_number += 1
						content_1 = ''
						content_2 = ''
					if (line_number % 100 == 0):
						print 'Indexing line: ',line_number
				else:
					content_1 += line
					content_2 += lines_2[counter]
	
		file_1.close()
		file_2.close()
test_images_src = './test_images'
test_images_des = 'test_images_results'

test_vedios_src = './test_vedios'
test_vedios_des = './output_vedios'

# Calibration
logger_obj = Logger(str(results_directory))
calibration_obj = Calibration(calibration_src_images, calibration_dst_images,
                              (9, 6), logger_obj)
## Get calibration data
camera_matrix, distortion_coefficent = calibration_obj.get_calibration_parameters(
)

## Get Parameters
pipeline_params = Parameters(logger_obj, camera_matrix, distortion_coefficent)

# Get Pipeline object
Runner = Pipeline(pipeline_params)


def main():
    # Images
    Runner.process_test_images(test_images_src)

    # Project vedio
    Runner.process_test_vedio(test_vedios_src, 'project_video',
                              test_vedios_des)


if __name__ == "__main__":
Exemplo n.º 28
0
def main(type_atc, argv):
	date_start = datetime.datetime.now()
	date_start = date_start.strftime("%Y-%m-%d %H:%M:%S")
	
	parameters = Parameters(type_atc, argv)
	contexts = parameters.getContexts()
	input_folder = parameters.getInputFolder()
	language = parameters.getLanguage()
	min_word_size = parameters.getMinWordSize()
	max_qty_terms = int(parameters.getMaxQtyTerms())
	output_folder = parameters.getOutputFolder()
	temp_folder = parameters.getTempFolder()
	record_log = parameters.getRecordLog()
	record_intermediate = parameters.getRecordIntermediate()
	seeds_file = parameters.getSeedsFile()
	stoplist_file = parameters.getStoplistFile()
	sim_measure = parameters.getSimilarityMeasure()
	del parameters

	logfile = LogFile(record_log, str(date_start), None, input_folder, language, stoplist_file, min_word_size, max_qty_terms, None, output_folder, None, temp_folder, seeds_file, sim_measure)

	if contexts:
		logfile.writeLogfile('- Building syntactics relations from '+temp_folder)
		contexts = Contexts(temp_folder)
		del contexts
	else:
		logfile.writeLogfile('- Building syntactics relations from '+input_folder)
		ling_corpus = StanfordSyntacticContexts(input_folder, temp_folder, stoplist_file, min_word_size, record_intermediate)
		del ling_corpus

	logfile.writeLogfile('- Merging terms to '+temp_folder+'Relations2ndOrder.txt')

	command = 'cat '+temp_folder+'AN_Relations.txt '+temp_folder+'SV_Relations.txt '+temp_folder+'VO_Relations.txt '+' > '+temp_folder+'Relations2ndOrder.txt'
	os.system(command)

	logfile.writeLogfile('- Calculating similarity using '+sim_measure)
	measures = Measures(temp_folder+'Relations2ndOrder.txt', seeds_file)
	dic_topn = measures.getTopNToAllSeeds(sim_measure, max_qty_terms)
	del measures

	logfile.writeLogfile('- Building thesaurus in '+output_folder+'T_'+type_atc+'_'+sim_measure+'.xml')

	thesaurus = Thesaurus(output_folder+'T_'+type_atc+'_'+sim_measure+'.xml',max_qty_terms)
	thesaurus.write(dic_topn)
	del thesaurus

	date_end = datetime.datetime.now()
	date_end = date_end.strftime("%Y-%m-%d %H:%M:%S")
	logfile.writeLogfile('- Thesaurus sucessfully built!\nEnding process at: '+str(date_end)+'.\n')
	del logfile
Exemplo n.º 29
0
def main(type_atc, argv):
    list_relations = ['AN', 'SV', 'VO']

    date_start = datetime.datetime.now()
    date_start = date_start.strftime("%Y-%m-%d %H:%M:%S")

    parameters = Parameters(type_atc, argv)
    contexts = parameters.getContexts()
    svd_dimension = int(parameters.getSvdDimension())
    input_folder = parameters.getInputFolder()
    language = parameters.getLanguage()
    min_word_size = parameters.getMinWordSize()
    max_qty_terms = int(parameters.getMaxQtyTerms())
    output_folder = parameters.getOutputFolder()
    temp_folder = parameters.getTempFolder()
    record_log = parameters.getRecordLog()
    record_intermediate = parameters.getRecordIntermediate()
    seeds_file = parameters.getSeedsFile()
    stoplist_file = parameters.getStoplistFile()
    sim_measure = parameters.getSimilarityMeasure()
    del parameters

    logfile = LogFile(record_log, str(date_start), svd_dimension, input_folder,
                      language, stoplist_file, min_word_size, max_qty_terms,
                      None, output_folder, None, temp_folder, seeds_file,
                      sim_measure)

    #if contexts:
    #	logfile.writeLogfile('- Building syntactics relations from '+temp_folder)
    #	contexts = Contexts(temp_folder)
    #	del contexts
    #else:
    #	logfile.writeLogfile('- Building syntactics relations from '+input_folder)
    #	ling_corpus = StanfordSyntacticContexts(input_folder, temp_folder, stoplist_file, min_word_size, record_intermediate)
    #	del ling_corpus

    matrix_relation = Matrix(temp_folder, svd_dimension, record_intermediate)
    del matrix_relation

    #similarities = Similarities(seeds_file, temp_folder, 'cosine')
    #dic_topn = similarities.getTopNOrderedDic(10)
    #del Similarities

    #logfile.writeLogfile('- Building thesaurus in '+output_folder+'T_'+type_atc+'_'+sim_measure+'.xml')
    #thesaurus = Thesaurus(output_folder+'T_'+type_atc+'_'+sim_measure+'.xml',max_qty_terms)
    #thesaurus.write(dic_topn)
    #del thesaurus

    date_end = datetime.datetime.now()
    date_end = date_end.strftime("%Y-%m-%d %H:%M:%S")
    logfile.writeLogfile(
        '- Thesaurus sucessfully built!\nEnding process at: ' + str(date_end) +
        '.\n')
    del logfile
Exemplo n.º 30
0
from java.awt import Panel, Dimension
from java.lang import StringBuilder

script_path = os.path.dirname(os.path.realpath(__file__))
if "Fiji.app" in script_path:
    ss = script_path.split("Fiji.app")
    final_folder = os.path.basename(script_path)
    script_path = os.path.join(ss[0], "Fiji.app", "plugins", "Scripts",
                               "Plugins", final_folder)
sys.path.insert(0, os.path.join(script_path, 'modules'))
sys.path.insert(0, os.path.join(script_path, 'classes'))

from Parameters import Parameters

readme_fpath = os.path.join(script_path, "README.txt")
params = Parameters()

title = "Membrane Blebbing version " + Parameters._version_string

try:
    f = open(readme_fpath, "rb")
    text = f.readlines()
except:
    raise IOError("Error reading README.txt")
finally:
    f.close()

sb = StringBuilder()
for line in text:
    sb.append(line)
Exemplo n.º 31
0
 def __init__(self):
     Parameters.__init__(self)
     self.registerFloatParameter("rho", "Density value")
     self.registerFloatParameter("p", "Pressure value")
     self.registerFloatParameter("T", "Temperature value")
Exemplo n.º 32
0
if __name__ == '__main__':
    args = parse_args()
    print(args)
    filepath = args.path + args.dataset
    result_file = args.res_file
    cdcf_flag = True

    # Data processing
    t1 = time()
    print('Data loading...')
    dataset = Dataset(filepath, cdcf_flag)

    user_input, item_input, train_rating, train_domain = dataset.trainArrQuadruplets
    valid_user_input, valid_item_input, valid_rating, valid_domain = dataset.validArrQuadruplets
    test_user_input, test_item_input, test_rating, test_domain = dataset.testArrQuadruplets
    params = Parameters(args, dataset)

    #cdcf
    user_cdcf_input = params.num_users * train_domain + user_input
    valid_user_cdcf_input = params.num_users * valid_domain + valid_user_input
    test_user_cdcf_input = params.num_users * test_domain + test_user_input

    params.set_input(user_input, item_input, train_rating, train_domain,
                     user_cdcf_input, valid_user_input, valid_item_input,
                     valid_rating, valid_domain, valid_user_cdcf_input,
                     test_user_input, test_item_input, test_rating,
                     test_domain, test_user_cdcf_input)

    print(
        """Load data done [%.1f s]. #user:%d, #item:%d, #src_item:%d,#tar_item:%d,
          #train:%d, #test:%d, #valid:%d""" %
Exemplo n.º 33
0
 def __init__(self):
     Parameters.__init__(self)
Exemplo n.º 34
0
    def init(self, rootfolder, cli=False, conditions=None):
        """
        Initialize dependencies
        :param string rootfolder: full path to root folder
        :type rootfolder: str
        :param cli: Run in command line (True)
        :type cli: bool
        :param conditions: dictionary providing experiment conditions (to be used instead of conditions.json)
        :type conditions: dict
        """
        # Get experiment
        self.experimentsFolder = "{}/experiments/".format(rootfolder)
        self.expname = self.__get_experiment(self.experimentsFolder, cli=cli)

        # Get logger
        if not isdir('{}/logs'.format(rootfolder)):
            mkdir('{}/logs'.format(rootfolder))
        self.logger = self.get_logger(rootfolder, self.expname)

        # Import experiment
        self.__import_experiment(self.experimentsFolder, self.expname)
        self.exp_version = RunTrial.version if hasattr(RunTrial,
                                                       'version') else '1.0.0'

        # Print welcome message
        print("\n##############################\n")
        self.logger.info("# Welcome to {} (version {})".format(
            self.appname, v.__version__))
        self.logger.info("# {}".format(v.__copyright__))
        self.logger.info("# Date: {}".format(
            time.strftime("%d-%m-%y %H:%M:%S")))
        self.logger.info("# Experiment: {} (version {})".format(
            self.expname, self.exp_version))
        print("\n##############################\n")

        # Get and set configuration
        self.config = Config(rootfolder=rootfolder,
                             expname=self.expname,
                             cli=cli)
        self.config.setup()

        self.settings = self.config.settings
        self.folders = self.config.folders
        self.files = self.config.files

        # Get experiment parameters
        self.parameters = Parameters(self.files['parameters'])

        # Create user
        self.user = User(data_folder=self.folders['data'],
                         expname='{}_v{}'.format(
                             self.expname, self.exp_version.replace('.', '-')),
                         **self.__filter_args(User, self.settings['setup']))
        self.user.setup(cli=cli)

        self.files['design'] = self.user.designfile

        # Make factorial design
        self.design = Design(conditionfile=self.files['conditions'],
                             userfile=self.files['design'],
                             folder=self.folders['expFolder'],
                             conditions=conditions,
                             **self.__filter_args(Design,
                                                  self.settings['setup']))
        self.design.make()

        # Devices
        self.devices = Devices(exp_folder=self.folders['expFolder'],
                               base_name=self.user.dftName,
                               cli=cli)

        # Screen
        self.screen = Screen(expfolder=self.folders['expFolder'],
                             expname=self.expname,
                             **self.__filter_args(Screen,
                                                  self.settings['display']))
Exemplo n.º 35
0
def sanitization_generation_metrics(feature_order=None,
                                    alpha_=P.Alpha,
                                    lambda_=P.Lambda,
                                    san_loss=P.SanLoss,
                                    pred_loss=P.PredLoss,
                                    disc_loss=P.DiscLoss,
                                    max_epoch=P.Epoch,
                                    k_pred=P.KPred,
                                    k_disc=P.KDisc,
                                    scale=P.Scale):

    # Return models and datasets

    # Take the first 70% timestep as training.
    train_prep = D.Preprocessing(P.TrainPath,
                                 prep_excluded=P.PreprocessingExcluded,
                                 scale=P.Scale,
                                 prep_included=P.PreprocessingIncluded)
    train_prep.set_features_ordering(feature_order)
    train_prep.fit_transform()
    test_prep = D.Preprocessing(P.TestPath,
                                prep_excluded=P.PreprocessingExcluded,
                                scale=P.Scale,
                                prep_included=P.PreprocessingIncluded)
    test_prep.set_features_ordering(feature_order)
    test_prep.fit_transform()
    train_ds = D.MotionSenseDataset(train_prep,
                                    window_overlap=P.Window_overlap)
    test_ds = D.MotionSenseDataset(test_prep, window_overlap=P.Window_overlap)

    # Shape of unique values
    uniq_act = np.unique(train_ds.activities)
    uniq_sens = np.unique(train_ds.sensitive)
    uniq_uid = np.unique(train_ds.users_id)
    phys_cols = train_ds.phy_data.shape[1]
    try:
        act_cols = train_ds.activities.shape[1]
    except IndexError:
        act_cols = 1

    # Discriminator target
    disc_target_values = uniq_sens
    pred_target_values = uniq_act

    # Load dataset
    # Create dataloader
    # build data loaders
    batch_size = P.BatchSize
    s_train_dl = data.DataLoader(train_ds,
                                 batch_size=batch_size,
                                 shuffle=True,
                                 num_workers=4)
    d_train_dl = data.DataLoader(train_ds.copy(True),
                                 batch_size=batch_size,
                                 shuffle=True,
                                 num_workers=4)
    d_dl_iter = iter(d_train_dl)
    p_train_dl = data.DataLoader(train_ds.copy(True),
                                 batch_size=batch_size,
                                 shuffle=True,
                                 num_workers=4)
    p_dl_iter = iter(p_train_dl)

    # Create models:

    sanitizer = M.SanitizerConv(input_channels=train_ds.input_channels,
                                seq_len=train_ds.seq_len,
                                kernel_sizes=[5, 5],
                                strides=[1, 1],
                                conv_paddings=[0, 0],
                                phyNodes=phys_cols,
                                noiseNodes=P.NoiseNodes,
                                actNodes=act_cols)

    # Adding physio data can prevent the sensor information to be dependent of such attribute because the disc model
    # Can not predict the sensitive value even though the height weight and other are given. Or we know that if an
    # attribute is strongly correlated, then the model will find such correlation. Example: Create a model to predict
    # something and in train set, give the target as data input the predict the same target. The model will learn to dis-
    # regard other columns
    # Predictor model output should be of same shape as necessary for NLLLoss. (model output is a matrix while target is
    # a vector).
    def get_models(input_channels=train_ds.input_channels,
                   seq_len=train_ds.seq_len,
                   pred_out_size=pred_target_values.shape[0],
                   disc_out_size=disc_target_values.shape[0],
                   phys_cols=phys_cols,
                   act_cols=act_cols):
        predictor = M.PredictorConv(input_channels=input_channels,
                                    seq_len=seq_len,
                                    output_size=pred_out_size,
                                    physNodes=phys_cols)
        predictor.to(DEVICE)
        pred_optim = M.get_optimizer(predictor, )

        discriminator = M.DiscriminatorConv(input_channels=input_channels,
                                            seq_len=seq_len,
                                            output_size=disc_out_size,
                                            physNodes=phys_cols + act_cols)
        discriminator.to(DEVICE)
        disc_optim = M.get_optimizer(discriminator, )
        return predictor, pred_optim, discriminator, disc_optim

    def reset_weights(m):
        try:
            m.reset_parameters()
        except AttributeError as e:
            pass
            # print(e)
            # print("Layer not affected")

    predictor, pred_optim, discriminator, disc_optim = get_models()
    # Send models on GPU or CPU
    sanitizer.to(DEVICE)

    # Check the latest Epoch to start sanitization
    start_epoch = M.get_latest_states(P.ModelsDir(),
                                      sanitizer,
                                      discriminator,
                                      predictor,
                                      otherParamFn=P.ParamFunction)

    # Initialise losses
    san_loss = Cl.SanitizerBerLoss(alpha_=alpha_,
                                   lambda_=lambda_,
                                   recOn=P.RecOn,
                                   optim_type=P.OptimType,
                                   device=DEVICE)
    # pred_loss = Cl.AccuracyLoss(device=DEVICE)
    pred_loss = Cl.BalancedErrorRateLoss(targetBer=0, device=DEVICE)
    disc_loss = Cl.BalancedErrorRateLoss(targetBer=0, device=DEVICE)

    # Optimizers
    san_optim = M.get_optimizer(sanitizer, )

    losses_frame_path = "{}/{}.csv".format(P.ModelsDir(),
                                           P.ParamFunction("losses"))
    san_losses = [[], [], []]
    disc_losses = []
    pred_losses = []
    if (start_epoch > 1) and tryReading(losses_frame_path):
        losses_frame = pd.read_csv(losses_frame_path)
        disc_losses = losses_frame["disc"].values.tolist()
        pred_losses = losses_frame["pred"].values.tolist()
        san_losses = losses_frame.drop(["pred", "disc"],
                                       axis=1).T.values.tolist()

    # Function to differentiate and integrate the activities. (Ignore for the predictor, integrate for the sanitizer)
    act_fn_disc = lambda ps, act: torch.cat(
        (ps, act * P.DecorrelateActAndSens), 1)
    act_fn_pred = lambda ps, act: ps

    # Init figure
    fig = "asdfoijbnad"
    plt.figure(fig, figsize=(14, 14))

    # Sanitize
    print("Starting Sanitizing ......>")
    for epoch in tqdm.tqdm(range(start_epoch, max_epoch + 1)):
        print("Current Epoch: {}".format(epoch))
        if P.TrainingResetModelsStates:
            predictor.apply(reset_weights)
            discriminator.apply(reset_weights)

            # del predictor
            # del discriminator
            # del disc_optim
            # del pred_optim
            # predictor, pred_optim, discriminator, disc_optim = get_models()

        for sample in s_train_dl:

            # Train the sanitizer
            l = train_sanitizer(
                sample,
                sanitizer,
                discriminator,
                predictor,
                san_loss,
                san_optim,
                act_fn=act_fn_disc,
                act_select=P.ActivitySelection,
                phys_select=P.PhysiologSelection,
                phys=P.PhysInput,
                san_acts=P.SanitizeActivities,
            )
            san_losses[0].append(
                l[0].mean().to(CPU_DEVICE).data.numpy().reshape(-1)[0])
            san_losses[1].append(
                l[1].to(CPU_DEVICE).data.numpy().reshape(-1)[0])
            san_losses[2].append(
                l[2].to(CPU_DEVICE).data.numpy().reshape(-1)[0])

            # Train the predictor
            l, p_dl_iter = train_predictor(
                pred_losses,
                k_pred,
                sanitizer,
                predictor,
                p_train_dl,
                p_dl_iter,
                pred_loss,
                pred_optim,
                act_fn=act_fn_pred,
                act_select=P.ActivitySelection,
                phys_select=P.PhysiologSelection,
                target_key="act",
                sens_key="sens",
                phys=P.PhysInput,
                san_acts=P.SanitizeActivities,
            )
            pred_losses.append(l.to(CPU_DEVICE).data.numpy().reshape(-1)[0])
            # Train the discriminator
            l, d_dl_iter = train_predictor(
                disc_losses,
                k_pred,
                sanitizer,
                discriminator,
                d_train_dl,
                d_dl_iter,
                disc_loss,
                disc_optim,
                act_fn=act_fn_disc,
                act_select=P.ActivitySelection,
                phys_select=P.PhysiologSelection,
                target_key="sens",
                sens_key="sens",
                phys=P.PhysInput,
                san_acts=P.SanitizeActivities,
            )
            disc_losses.append(l.to(CPU_DEVICE).data.numpy().reshape(-1)[0])

        print("***")
        # Save losses, and models states.
        # Saving models States.
        M.save_classifier_states(sanitizer,
                                 epoch,
                                 P.ModelsDir(),
                                 otherParamFn=P.ParamFunction,
                                 ext="S")
        M.save_classifier_states(discriminator,
                                 epoch,
                                 P.ModelsDir(),
                                 otherParamFn=P.ParamFunction,
                                 ext="D")
        M.save_classifier_states(predictor,
                                 epoch,
                                 P.ModelsDir(),
                                 otherParamFn=P.ParamFunction,
                                 ext="P")
        # Saving and plotting losses
        losses_frame = pd.DataFrame.from_dict({
            "san_rec": san_losses[0],
            "san_act": san_losses[1],
            "san_sens": san_losses[2],
            "disc": disc_losses,
            "pred": pred_losses,
        })
        losses_frame.to_csv(losses_frame_path, index=False)
        losses_frame["san_sens"] = san_loss.disc_loss.get_true_value(
            losses_frame["san_sens"].values)
        if epoch % P.PlotRate == 0:
            plt.subplot(5, 1, 1)
            sns.lineplot(x="index",
                         y="san_rec",
                         data=losses_frame.reset_index())
            plt.subplot(5, 1, 2)
            sns.lineplot(x="index",
                         y="san_act",
                         data=losses_frame.reset_index())
            plt.subplot(5, 1, 3)
            sns.lineplot(x="index",
                         y="san_sens",
                         data=losses_frame.reset_index())
            plt.subplot(5, 1, 4)
            sns.lineplot(x="index", y="disc", data=losses_frame.reset_index())
            plt.subplot(5, 1, 5)
            sns.lineplot(x="index", y="pred", data=losses_frame.reset_index())
            plt.savefig("{}/{}.png".format(P.FiguresDir(),
                                           P.ParamFunction("losses")))
            plt.clf()

    # Check datasets and generate


# def generate_dataset(san, train_prep, train_ds, train_dl, test_prep, test_ds, test_dl, gen_path, train_id="train",
#                      test_id="test", max_epoch=0, addParamFn=None, phys=1, san_acts=1, san_phys=1):

    print("Generating Sanitized Datasets")
    generate_dataset(sanitizer,
                     train_prep,
                     train_ds,
                     test_prep,
                     test_ds,
                     P.GenDataDir(),
                     max_epoch=P.Epoch,
                     addParamFn=P.ParamFunction,
                     phys=P.PhysInput,
                     san_acts=P.SanitizeActivities,
                     san_phys=P.SanitizePhysio)
    # Check if everything has been correctly generated

    #print("Computing Metrics")
    # If device == cpu_device, then we are not supposed to use gpu as there might not be anyone
    """metrics_computation(input_channels=train_ds.input_channels, seq_len=train_ds.seq_len,
Exemplo n.º 36
0
    def setUp(self):
        # powergrid
        self.backend = PandaPowerBackend()
        self.path_matpower = PATH_DATA_TEST_PP
        self.case_file = "test_case14.json"

        # chronics
        self.path_chron = os.path.join(PATH_CHRONICS, "chronics")
        self.chronics_handler = ChronicsHandler(
            chronicsClass=GridStateFromFile, path=self.path_chron)

        self.tolvect = 1e-2
        self.tol_one = 1e-5
        self.id_chron_to_back_load = np.array(
            [0, 1, 10, 2, 3, 4, 5, 6, 7, 8, 9])

        # force the verbose backend
        self.backend.detailed_infos_for_cascading_failures = True

        self.names_chronics_to_backend = {
            "loads": {
                "2_C-10.61": 'load_1_0',
                "3_C151.15": 'load_2_1',
                "14_C63.6": 'load_13_2',
                "4_C-9.47": 'load_3_3',
                "5_C201.84": 'load_4_4',
                "6_C-6.27": 'load_5_5',
                "9_C130.49": 'load_8_6',
                "10_C228.66": 'load_9_7',
                "11_C-138.89": 'load_10_8',
                "12_C-27.88": 'load_11_9',
                "13_C-13.33": 'load_12_10'
            },
            "lines": {
                '1_2_1': '0_1_0',
                '1_5_2': '0_4_1',
                '9_10_16': '8_9_2',
                '9_14_17': '8_13_3',
                '10_11_18': '9_10_4',
                '12_13_19': '11_12_5',
                '13_14_20': '12_13_6',
                '2_3_3': '1_2_7',
                '2_4_4': '1_3_8',
                '2_5_5': '1_4_9',
                '3_4_6': '2_3_10',
                '4_5_7': '3_4_11',
                '6_11_11': '5_10_12',
                '6_12_12': '5_11_13',
                '6_13_13': '5_12_14',
                '4_7_8': '3_6_15',
                '4_9_9': '3_8_16',
                '5_6_10': '4_5_17',
                '7_8_14': '6_7_18',
                '7_9_15': '6_8_19'
            },
            "prods": {
                "1_G137.1": 'gen_0_4',
                "3_G36.31": "gen_2_1",
                "6_G63.29": "gen_5_2",
                "2_G-56.47": "gen_1_0",
                "8_G40.43": "gen_7_3"
            },
        }

        # _parameters for the environment
        self.env_params = Parameters()

        self.env = Environment(
            init_grid_path=os.path.join(self.path_matpower, self.case_file),
            backend=self.backend,
            chronics_handler=self.chronics_handler,
            parameters=self.env_params,
            names_chronics_to_backend=self.names_chronics_to_backend)
Exemplo n.º 37
0
def metrics_computation(input_channels,
                        seq_len,
                        features_order,
                        kernel_size=[5, 5],
                        strides=[1, 1],
                        conv_paddings=[0, 0],
                        gpu=True,
                        gpu_device="cuda:0",
                        gen_path=P.GenDataDir(),
                        alpha_=P.Alpha,
                        lambda_=P.Lambda,
                        train_id="train",
                        test_id="test",
                        epoch=P.Epoch,
                        addParamFn=P.ParamFunction,
                        seed=42,
                        verbose=False):
    """
    """
    # Add restarting mechanism.
    metrics = Me.Metrics(input_channels=input_channels,
                         seq_len=seq_len,
                         kernel_sizes=kernel_size,
                         strides=strides,
                         conv_paddings=conv_paddings,
                         gpu=gpu,
                         distance_metric=P.DistanceMetric,
                         gpu_device=gpu_device,
                         prep_included=P.PreprocessingIncluded,
                         prep_excluded=P.PreprocessingExcluded,
                         scale=P.Scale,
                         features_ordering=features_order,
                         seed=seed,
                         verbose=verbose,
                         data_fmt_class="MotionSenseDataset",
                         window_overlap=P.Window_overlap)
    results = R.Results(resultDir=P.ResultsDir())
    sr = R.StopRestart(resultDir=P.ResultsDir())

    # Read data and compute metrics,
    # Baseline
    metric_epochs = 200
    metric_batch = 256

    def shaping(path):
        # Set the data as the same for all, the generated ones and the original ones such that we have the same
        # computation graph.
        data = D.MotionSenseDataset(path, window_overlap=P.Window_overlap)
        return data.__inverse_transform_conv__(sensor_tensor=data.sensor,
                                               phy=data.phy_data,
                                               act_tensor=data.activities,
                                               sens_tensor=data.sensitive,
                                               user_id_tensor=data.users_id,
                                               trials=data.trials,
                                               cpu_device=CPU_DEVICE)

    o_test = shaping(P.TestPath)
    if not sr.computed(epoch=0, alpha_=np.NaN, lambda_=np.NaN, Attribute="act") or \
        not sr.computed(epoch=0, alpha_=np.NaN, lambda_=np.NaN, Attribute="gender"):
        o_train = shaping(P.TrainPath)

        sp = metrics.sensitive_attribute(
            train_set=o_train,
            test_set=o_test,
            use_accuracy=False,
            act_name="act",
            drop=[],
            sens_name="gender",
            ms_act_name="act",
            ms_sens_name="sens",
            sklearn_data_process=None,
            use_phys=True,
            physNodes=3,
            phys_names=["height", "weight", "age"],
            ms_phys_name="phy",
            epoch=metric_epochs,
            batch_size=metric_batch,
            loss_fn=None)
        tp = metrics.task(train_set=o_train,
                          test_set=o_test,
                          act_name="act",
                          drop=[],
                          sens_name="gender",
                          ms_act_name="act",
                          ms_sens_name="sens",
                          sklearn_data_process=None,
                          use_phys=True,
                          physNodes=3,
                          phys_names=["height", "weight", "age"],
                          ms_phys_name="phy",
                          epoch=metric_epochs,
                          batch_size=metric_batch,
                          loss_fn=None)
        di = metrics.distance(o_test, o_test)
        # Add and save results
        results.add_result(distance=di,
                           s_acc=sp[0],
                           s_ber=sp[1],
                           t_acc=tp[0],
                           t_ber=tp[1],
                           sens_name="gender",
                           act_name="act",
                           epoch=0,
                           alpha_=np.NaN,
                           lambda_=np.NaN)

    # Sanitization
    name = lambda n, e: "{}/{}_{}.csv".format(gen_path, n, addParamFn(e))
    for epoch in tqdm.tqdm(range(1, epoch + 1)):
        if not sr.computed(epoch=epoch, alpha_=alpha_, lambda_=lambda_, Attribute="act") or \
            not sr.computed(epoch=epoch, alpha_=alpha_, lambda_=lambda_, Attribute="gender"):
            train = shaping(name(train_id, epoch))
            test = shaping(name(test_id, epoch))
            sp = metrics.sensitive_attribute(
                train_set=train,
                test_set=test,
                use_accuracy=False,
                act_name="act",
                drop=[],
                sens_name="gender",
                ms_act_name="act",
                ms_sens_name="sens",
                sklearn_data_process=None,
                use_phys=True,
                physNodes=3,
                phys_names=["height", "weight", "age"],
                ms_phys_name="phy",
                epoch=metric_epochs,
                batch_size=metric_batch,
                loss_fn=None,
                learning_rate=5e-4,
                weight_decay=0)
            tp = metrics.task(train_set=train,
                              test_set=test,
                              act_name="act",
                              drop=[],
                              sens_name="gender",
                              ms_act_name="act",
                              ms_sens_name="sens",
                              sklearn_data_process=None,
                              use_phys=True,
                              physNodes=3,
                              phys_names=["height", "weight", "age"],
                              ms_phys_name="phy",
                              epoch=metric_epochs,
                              batch_size=metric_batch,
                              loss_fn=None,
                              learning_rate=5e-4,
                              weight_decay=0)
            di = metrics.distance(o_test_set=o_test, test_set=test)
            # Add and save results
            results.add_result(distance=di,
                               s_acc=sp[0],
                               s_ber=sp[1],
                               t_acc=tp[0],
                               t_ber=tp[1],
                               sens_name="gender",
                               act_name="act",
                               epoch=epoch,
                               alpha_=alpha_,
                               lambda_=lambda_)
from datetime import datetime

import numpy as np
import tensorflow as tf
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix

from utils.Dataset_hdf5 import DatasetHDF5
from utils.Dataset_hdf5_cv import DatasetHDF5_Kfold
from Parameters import Parameters

# ===============get basic folder=====================
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'models'))
para = Parameters()

# log
MODEL = importlib.import_module(para.model)  # import network module
LOG_DIR = para.logDir
LOG_MODEL = para.logmodelDir
LOG_FOUT = open(os.path.join(LOG_DIR, f'{para.expName}.txt'), 'w')
LOG_FOUT.write(str(para.__dict__) + '\n')

# set parameters
if para.gpu:
    gpus = tf.config.experimental.list_physical_devices('GPU')
    for gpu in gpus:
        tf.config.experimental.set_memory_growth(gpu, True)
else:
    os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
Exemplo n.º 39
0
    def setUp(self):
        """
        The case file is a representation of the case14 as found in the ieee14 powergrid.
        :return:
        """
        # from ADNBackend import ADNBackend
        # self.backend = ADNBackend()
        # self.path_matpower = "/home/donnotben/Documents/RL4Grid/RL4Grid/data"
        # self.case_file = "ieee14_ADN.xml"
        # self.backend.load_grid(self.path_matpower, self.case_file)
        self.tolvect = 1e-2
        self.tol_one = 1e-5
        self.game_rules = GameRules()
        # pdb.set_trace()
        self.rewardClass = L2RPNReward
        self.reward_helper = self.rewardClass()
        self.obsClass = CompleteObservation
        self.parameters = Parameters()

        # powergrid
        self.backend = PandaPowerBackend()
        self.path_matpower = PATH_DATA_TEST_PP
        self.case_file = "test_case14.json"

        # chronics
        self.path_chron = os.path.join(PATH_CHRONICS, "chronics_with_forecast")
        self.chronics_handler = ChronicsHandler(
            chronicsClass=GridStateFromFileWithForecasts, path=self.path_chron)

        self.tolvect = 1e-2
        self.tol_one = 1e-5
        self.id_chron_to_back_load = np.array(
            [0, 1, 10, 2, 3, 4, 5, 6, 7, 8, 9])

        # force the verbose backend
        self.backend.detailed_infos_for_cascading_failures = True

        self.names_chronics_to_backend = {
            "loads": {
                "2_C-10.61": 'load_1_0',
                "3_C151.15": 'load_2_1',
                "14_C63.6": 'load_13_2',
                "4_C-9.47": 'load_3_3',
                "5_C201.84": 'load_4_4',
                "6_C-6.27": 'load_5_5',
                "9_C130.49": 'load_8_6',
                "10_C228.66": 'load_9_7',
                "11_C-138.89": 'load_10_8',
                "12_C-27.88": 'load_11_9',
                "13_C-13.33": 'load_12_10'
            },
            "lines": {
                '1_2_1': '0_1_0',
                '1_5_2': '0_4_1',
                '9_10_16': '8_9_2',
                '9_14_17': '8_13_3',
                '10_11_18': '9_10_4',
                '12_13_19': '11_12_5',
                '13_14_20': '12_13_6',
                '2_3_3': '1_2_7',
                '2_4_4': '1_3_8',
                '2_5_5': '1_4_9',
                '3_4_6': '2_3_10',
                '4_5_7': '3_4_11',
                '6_11_11': '5_10_12',
                '6_12_12': '5_11_13',
                '6_13_13': '5_12_14',
                '4_7_8': '3_6_15',
                '4_9_9': '3_8_16',
                '5_6_10': '4_5_17',
                '7_8_14': '6_7_18',
                '7_9_15': '6_8_19'
            },
            "prods": {
                "1_G137.1": 'gen_0_4',
                "3_G36.31": "gen_2_1",
                "6_G63.29": "gen_5_2",
                "2_G-56.47": "gen_1_0",
                "8_G40.43": "gen_7_3"
            },
        }

        # _parameters for the environment
        self.env_params = Parameters()

        self.env = Environment(
            init_grid_path=os.path.join(self.path_matpower, self.case_file),
            backend=self.backend,
            chronics_handler=self.chronics_handler,
            parameters=self.env_params,
            names_chronics_to_backend=self.names_chronics_to_backend,
            rewardClass=self.rewardClass)
Exemplo n.º 40
0
from Serializer import Serializer, serialize_data
from Vocabs import Vocabs, make_train_data
from Seq2Seq import Seq2Seq
from Parameters import Parameters
from MakeSeq import make_seq

if __name__ == u'__main__':

    # args
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', '-g', default=None, type=int)
    args = parser.parse_args()

    # calculation parameters
    parameters = Parameters()

    # get data
    vocabs = Vocabs()
    serializer = Serializer(vocabs)
    train_data = make_train_data(vocabs, 
                                 parameters.time_steps, 
                                 parameters.data_num)
    
    print('--- first 10 data ---')
    for data in train_data[0: 10]:
        print(''.join(data))

    train_data, result_data = serialize_data(serializer, train_data, 
                                             parameters.time_steps)
Exemplo n.º 41
0
print('wait for it...')

seed = 0

args = {
    'L': sp.L,
    'T': sp.T,
    'K': sp.K,
    'lambda_': sp.lambda_,
    'epsilon': sp.epsilon,
    'c': sp.c,
    'eta': sp.eta,
    'seed': seed,
}
p = Parameters(**args)

args = {
    'N': sc.N,
    'm': sc.m,
    'n': sc.n,
    'A': sc.A,
    'B': sc.B,
    'C': sc.C,
    'D': sc.D,
    'P': sc.P,
    'F': sc.F,
    'X': zeros_like(sc.X),
}
m = MJLS(**args)
Exemplo n.º 42
0
 def setParameters(self, name, destination):
     self.param_file = Parameters(name=self.name,destination=destination)
     self.writeParamHeaders()
     self.writeParamFilenames()
     self.closeParam()
def main():
    # ensure consistent preference settings
    Prefs.blackBackground = False

    # get locations for previous and new outputs
    params = Parameters()
    output_folder_old, output_folder = mbio.rerun_location_chooser(
        params.input_image_path)
    params.loadParametersFromJson(
        os.path.join(output_folder_old, 'parameters used.json'))
    params.setOutputPath(os.path.join(output_folder, "looped analysis"))
    os.makedirs(params.output_path)

    # prompt user for path to text file describing the looped analysis
    loop_file_path = mbio.input_file_location_chooser(
        params.output_path,
        filt='*.txt',
        message="Please choose a text file containing the looping data...")

    # get original image file (for overlays etc.)
    if not os.path.isfile(params.input_image_path):
        mbui.warning_dialog([
            "The original data can't be found at the location specified in saved parameters. ",
            "(Possibly something as simple as a change in network drive mapping has occurred)",
            "Please specify the location of the original image file..."
        ])
        params.setInputImagePath(
            mbio.input_file_location_chooser(params.output_path))

    import_opts, params = mbui.choose_series(params.input_image_path, params)
    imps = bf.openImagePlus(import_opts)
    imp = imps[0]

    if imp.getNSlices() > 1:
        mbui.warning_dialog([
            "More than one Z plane detected.",
            "I will do a maximum projection before proceeding", "Continue?"
        ])
        imp = ZProjector.run(imp, "max all")

    params = mbio.get_metadata(params)
    params.setCurvatureLengthUm(
        round(params.curvature_length_um / params.pixel_physical_size) *
        params.pixel_physical_size)
    params.persistParameters()
    IJ.run(imp, "Set Scale...", "distance=0 known=0 pixel=1 unit=pixel")
    imp.show()
    if imp.getNChannels() > 1:
        imp.setPosition(params.membrane_channel_number, 1, 1)
    mbui.autoset_zoom(imp)
    IJ.run("Enhance Contrast", "saturated=0.35")

    # prompt user to select ROI
    original_imp = Duplicator().run(imp)
    _, crop_params = mbui.crop_to_ROI(imp, params)
    imp.show()

    if crop_params is not None:
        params.perform_spatial_crop = True
        mbui.autoset_zoom(imp)
        imp.updateAndDraw()
        review_crop = mb.check_cropping(output_folder_old, params)
        keep_crop = not review_crop
        if review_crop:
            keep_crop = mbui.crop_review()
        if not keep_crop:
            imp.changes = False
            imp.close()
            imp = original_imp
        else:
            original_imp.close()
    else:
        original_imp.close()

    # prompt user to do time cropping
    imp, start_end_tuple = mbui.time_crop(imp, params)
    params.setTimeCropStartEnd(start_end_tuple)

    # import edges
    membrane_edges = mbio.load_qcd_edges2(
        os.path.join(output_folder_old, "user_defined_edges.zip"))
    mbio.save_qcd_edges2(membrane_edges, params.output_path)

    # generate list of Parameters objects to loop over
    params_list, loop_param_name, loop_param_vals = generate_params_list(
        loop_file_path, params)
    dummy_output_path = params.output_path

    for params, loop_param_val in zip(params_list, loop_param_vals):
        params.setOutputPath(
            os.path.join(dummy_output_path,
                         (loop_param_name + " = " + str(loop_param_val))))
        params.close_on_completion = True
        params.setCurvatureLengthUm(
            round(params.curvature_length_um / params.pixel_physical_size) *
            params.pixel_physical_size)
        os.makedirs(params.output_path)
        calculated_objects = CalculatedObjects()
        calculated_objects.membrane_edges = membrane_edges
        if params.time_crop_start_end[0] is not None:
            calculated_objects.timelist = [
                idx * params.frame_interval
                for idx in range(params.time_crop_start_end[0],
                                 params.time_crop_start_end[1] + 1)
            ]
        else:
            calculated_objects.timelist = [
                idx * params.frame_interval for idx in range(imp.getNFrames())
            ]

        split_channels = mbfs.split_image_plus(imp, params)
        membrane_channel_imp = split_channels[0]
        actin_channel_imp = split_channels[1]
        segmentation_channel_imp = None
        if params.photobleaching_correction:
            if os.path.isfile(
                    os.path.join(output_folder_old,
                                 'binary_membrane_stack.tif')):
                segmentation_binary_path = os.path.join(
                    output_folder_old, 'binary_membrane_stack.tif')
                segmentation_channel_imp = IJ.openImage(
                    segmentation_binary_path)
            else:
                segmentation_channel_imp = split_channels[2]
                segmentation_channel_imp = mb.make_and_clean_binary(
                    segmentation_channel_imp, params.threshold_method)
            split_channels[2] = segmentation_channel_imp

        calculated_objects = mbfs.calculate_outputs(params, calculated_objects,
                                                    split_channels)

        # output colormapped images and kymographs
        fig_imp_list = mbfs.generate_and_save_figures(
            imp, calculated_objects, params, membrane_channel_imp,
            segmentation_channel_imp)
        mbfs.save_csvs(calculated_objects, params)

        params.saveParametersToJson(
            os.path.join(params.output_path, "parameters used.json"))
        imp.changes = False
        IJ.setTool("zoom")
        if params.close_on_completion:
            for fig_imp in fig_imp_list:
                fig_imp.close()
    if params.close_on_completion:
        imp.close()

    return
vetor_normal=[0]
aux_vetor_normal=0
aux_flexion_left_knee=0
simetria_comprimento_passo=[0]
t=[0]
data_json = {}
data_json['dados']=[0]
movimento=0                ##Time up:0, Em círculos:1, Em linha reta:2, Elevação excessiva:3, Assimétrica:4 e Circundação do pé:5
altura_pe_esquerdo=0
altura_pe_direito=0
altura_calcanhar=0.135
ponto_tornozelo_direito=[]
ponto_tornozelo_esquerdo=[]
k=0
slide= 0 #input("Digite um valor para o silde: 0, 2 ou 4 ")
slide_result=Parameters.slide_gait_cycle(slide)
array_coordenadas=[] ##Array de coordenadas do esqueleto
matrix_coordenadas=[]
aux_movimento=[movimento]
CAPTURA='RGB'

angulo_nathan=[0]

#Aqui começa a análise dos vídeos para cada frame

for it_frames in range(video_loader.n_frames()):
    video_loader.load_next()

    tempo_anterior=time.time()
    aux_tempo=time.time() #atualiza o instante de tempo entre os frames
Exemplo n.º 45
0
	w = ((w_init - w_end) * (9999 - ite)/9999) + w_end
	return w 

def update_position(particle, low_bound, high_bound):
	for i, value in enumerate(particle.position):
		particle.position[i] = value + particle.velocity[i]
		if particle.position[i] > high_bound:
			particle.position[i] = high_bound - abs(particle.position[i] - high_bound)
  			particle.velocity[i] *= -1.0
  		elif particle.position[i] < low_bound:
			particle.position[i] = low_bound + abs(particle.position[i] - low_bound)
  			particle.velocity[i] *= -1.0

if __name__ == "__main__":
	## Configurations ##
	parameters = Parameters()
	dimensions = parameters.dimension
	population = parameters.population
	problem_type = parameters.problem_type
	# Search space bounds
	low_bound, high_bound = parameters.set_bounds(problem_type)
	# Algorithm configurations
	iterations = parameters.iterations
	C1 = parameters.C1
	C2 = parameters.C2
	w = parameters.w
	min_vel = parameters.min_vel
	max_v = parameters.max_v
	topology = parameters.topology
	pso_type = parameters.pso_type
	## End Configurations ##
Exemplo n.º 46
0
# Project: https://github.com/HugoCMU/SolarTree
# Description: Definitions for all parameters, runs/calls all other functions

import serial
import numpy as np
from Sensor import Sensor
from PinMaster import PinMaster
from Parameters import Parameters
import math
import sys

# Serial communication with Arduino
ser = serial.Serial('/dev/ttyACM0',  9600)

# Create Parameters object
params = Parameters()

# Add Navigation Parameters (name, value, description)
params.addParam('DATA_SAMPLE_SIZE', 3, 'Sample size for data (increase to stabilize at cost of speed)')
params.addParam('MAX_ITER', 2, 'Maximum number of Sense-Plan-Act Cycles')
params.addParam('MOVE_PER_TURN', 1, 'How far you want the robot to move each step (increments of 10cm)')
params.addParam('TIMEOUT', 0.1, 'How many seconds until sensor loop times out and returns a bunch of zeros')
params.addParam('FORWARD_VECTOR', [0, 1, 0], 'Describes the forward (Theta = 0) direction in the robot frame')

# Add SLAM Parameters (name, value, description)
params.addParam('RAND_DIST_MU', 0, 'Center of distribution (cm)')
params.addParam('RAND_DIST_SIGMA', 1, 'Standard deviation (cm)')
params.addParam('RAND_ANG_MU', 0, 'Degrees')
params.addParam('RAND_ANG_SIGMA', 10, 'Degrees')
params.addParam('RAND_NUM', 10, 'Number of random samples')
Exemplo n.º 47
0
 def __init__(self):
     Parameters.__init__(self, 167, 3, 128, 61, 20, 18)
Exemplo n.º 48
0
import serial
import numpy as np

from Sensor import Sensor
from PinMaster import PinMaster
from Parameters import Parameters

# ----------------------------------------------------
#           INITIALIZE GLOBAL VARIABLES
# ----------------------------------------------------

# Serial communication with Arduino
ser = serial.Serial('/dev/ttyACM0',  9600)

# Create Parameters object
params = Parameters()

# Add Navigation Parameters (name, value, description)
params.addParam('DATA_SAMPLE_SIZE', 3, 'Sample size for data (increase to stabilize at cost of speed)')
params.addParam('MAX_ITER', 10, 'Maximum number of Sense-Plan-Act Cycles')
params.addParam('MOVEMENT_WEIGHT', 5.0, 'Weight factor for movement vector vs move units to be traveled')
params.addParam('TIMEOUT', 0.1, 'How many seconds until sensor loop times out and returns a bunch of zeros')
params.addParam('FORWARD_VECTOR', [0, 1, 0], 'Describes the forward (Theta = 0) direction in the robot frame')
params.addParam('BACKWARD_VECTOR', [0, -1, 0], 'Describes the backwards (Theta = 0) direction in the robot frame')
params.addParam('WAIT_TIME', 2, 'How many seconds in between exploration steps')

# Add Tuning Parameters
params.addParam('DISTANCE_WEIGHT', [0.1, 0.1, 0.2],
                'Weighting of [X, Y, Theta] each when determining distance metric')

# Add Motor Parameters
Exemplo n.º 49
0
    args = parse_args()
    print(args)
    args_str = get_args_to_string(args)
    args.args_str = args_str
    print args_str
    print('Data loading...')
    t1, t_init = time(), time()
    #pdb.set_trace()
    if args.method.lower() in ['sorecgatitem']:
        dataset = SocialItem_Dataset(args)
    elif args.method.lower() in ['sorecgatuser']:
        dataset = SocialUser_Dataset(args)
    else:
        dataset = Dataset(args)

    params = Parameters(args, dataset)
    print(
        """Load data done [%.1f s]. #user:%d, #item:%d, #dom:%d, #train:%d, #test:%d, #valid:%d"""
        % (time() - t1, params.num_users, params.num_items, params.num_doms,
           params.num_train_instances, params.num_test_instances,
           params.num_valid_instances))
    print('Method: %s' % (params.method))
    if params.method in ['sorecgatitem', 'sorecgatuser']:
        model = Models(params)
    model.define_model()
    model.define_loss('all')
    print("Model definition completed: in %.2fs" % (time() - t1))

    train_step = get_optimizer(params.learn_rate,
                               params.optimizer).minimize(model.loss)
    init = tf.global_variables_initializer()
Exemplo n.º 50
0
def main(type_atc, argv):
	date_start = datetime.datetime.now()
	date_start = date_start.strftime("%Y-%m-%d %H:%M:%S")

	parameters = Parameters(type_atc, argv)
	contexts = parameters.getContexts()
	input_folder = parameters.getInputFolder()
	language = parameters.getLanguage()
	min_word_size = int(parameters.getMinWordSize())
	max_qty_terms = int(parameters.getMaxQtyTerms())
	mi_precision = parameters.getMIPrecision()
	output_folder = parameters.getOutputFolder()
	window_size = parameters.getWindowSize()
	temp_folder = parameters.getTempFolder()
	record_log = parameters.getRecordLog()
	record_intermediate = parameters.getRecordIntermediate()
	seeds_file = parameters.getSeedsFile()
	sim_measure = parameters.getSimilarityMeasure()
	del parameters
 
	logfile = LogFile(record_log, str(date_start), None, input_folder, language, None, min_word_size, max_qty_terms, mi_precision, output_folder, window_size, temp_folder, seeds_file, sim_measure)
	stat_corpus = StatisticalCorpus(input_folder, temp_folder, min_word_size, window_size)

	if not contexts:
		logfile.writeLogfile('- Building statistical corpus at '+temp_folder)
	
		if language == 'pt':
			stat_corpus.buildCorpus_pt()	
			param_nsp = '--token ../misc/tokens_nsp.pl'
		elif language == 'en':
			stat_corpus.buildCorpus_en()
			param_nsp = ''

		"""
			Uses count.pl from NGram Statistical Package (NSP) to get Bigrams in a window
		"""

		logfile.writeLogfile('- Getting bigrams to W'+window_size+'_Statistical_corpus.txt')

		command = 'count.pl --ngram 2 '+param_nsp+' --window '+window_size+' '+temp_folder+'W'+window_size+'_Statistical_corpus.txt '+temp_folder+'Statistical_corpus.txt'
		os.system(command)

		logfile.writeLogfile('- Using '+sim_measure+' as similarity measure')

		if sim_measure == 'mutual_information':
			mi = MutualInformation(temp_folder, 'W'+window_size+'_Statistical_corpus.txt', seeds_file, mi_precision)
			dic_terms = mi.getDicMI()
			del mi
		else:
			stat_corpus.buildSTRelations('W'+window_size+'_Statistical_corpus.txt', seeds_file)
			measures = Measures(temp_folder+'W'+window_size+'_Relations.txt', seeds_file)
			dic_terms = measures.getTopNToAllSeeds(sim_measure, max_qty_terms)
			del measures

	else:
		measures = Measures(temp_folder+'W'+window_size+'_Relations.txt', seeds_file)
		dic_terms = measures.getTopNToAllSeeds(sim_measure, max_qty_terms)
		del measures

	del stat_corpus

	logfile.writeLogfile('- Building thesaurus in '+output_folder+'T'+window_size+'_'+type_atc+'_'+sim_measure+'.xml')

	thesaurus = Thesaurus(output_folder+'T'+window_size+'_'+type_atc+'_'+sim_measure+'.xml',max_qty_terms)
	thesaurus.write(dic_terms)
	del thesaurus

	date_end = datetime.datetime.now()
	date_end = date_end.strftime("%Y-%m-%d %H:%M:%S")
	logfile.writeLogfile('- Thesaurus sucessfully built!\nEnding process at: '+str(date_end)+'.\n')
	del logfile