def __init__(self, parent, value = 1): self.value = Measure(value, self.unit) self.schematicComponent = parent self.schematic = parent.schematic self.solver = self.schematic.solver self.name = self.schematic.getNextName(self.namePrefix) self.nodes = [None, None]
def format_notes(self, notes): """ Formats the notes into a list of measures. Input - measure: current measure object Output - None """ measure_counter, time_counter = 0, 0 curr_measure = Measure(measure_counter, self.time_signature[0], self.time_signature[1]) for i in range(len(notes)): row = notes.iloc[i] note = Note(row.given_pitch, row.signal, row.loudness, row.time, duration=row.duration, typ=row.typ) if time_counter + row.duration > 4: """FIXME: this fills up the rest of the measure with a rest, but it can be better Ideally it would be smart enough to wrap up a measure if there's little cutoff or tie current note into next measure.""" curr_measure.wrap_up_time() self.addMeasure(curr_measure) measure_counter += 1 time_counter = 0 curr_measure = Measure(measure_counter, self.time_signature[0], self.time_signature[1]) curr_measure.addNote(row) curr_measure.wrap_up_time() self.addMeasure(curr_measure)
def __init__(self, dataset, model_path, valid_or_test,model_name): self.model = torch.load(model_path) self.model.eval() self.model_name=model_name self.dataset = dataset self.valid_or_test = valid_or_test self.measure = Measure()
def on_message(client, userdata, message): logger.debug("in: " + message.topic + "/" + str(message.payload.decode("utf-8"))) # only message should be receiving data pool if message.topic == f"getPool{args.DeviceID}": logger.info(f"recieved pool") # load json data as object dic = json.loads(message.payload) # shuffle data to obscure in-order matching for AG shuffle(dic) logger.debug(dic) for entry in dic: logger.debug(f"treating {entry}") # get payload payload = entry["entry"] # convert to bytes as_bytes = bytes.fromhex(payload) # decipher payload res = decipher.decrypt(as_bytes) # interpret as measure m = Measure("none") m.unpack(res) logger.info(f"publishing measure {m.MUID}") client.publish("measures", str(m))
def __init__(self): self.c = Communicate() self.d = Detection() self.m = Measure() self.msg_list = MSG_LIST # 提示消息字典 self.cmd_dict = {} # 命令字典 self._func_dict() # 填充cmd_dict,将类方法名中后缀为'_(\d+)'的方法添加进命令字典
def __init__(self, dataset, model_path): self.device = torch.device( 'cuda:0' if torch.cuda.is_available() else 'cpu') self.model = torch.load(model_path, map_location=self.device) self.model.eval() self.dataset = dataset self.measure = Measure() self.all_facts_as_set_of_tuples = set(self.allFactsAsTuples())
def __init__(self, dataset, model, valid_or_test, model_name): self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') self.model = model self.model.eval() self.dataset = dataset self.model_name = model_name self.valid_or_test = valid_or_test self.measure = Measure() self.all_facts_as_set_of_tuples = set(self.allFactsAsTuples())
def __init__(self, dataset, model_path, valid_or_test): self.device = torch.device( "cuda:0" if torch.cuda.is_available() else "cpu") self.model = torch.load(model_path, map_location=self.device) self.model.eval() self.dataset = dataset self.valid_or_test = valid_or_test self.measure = Measure() self.all_facts_as_set_of_tuples = set(self.allFactsAsTuples())
def __init__(self, dataset, model_path, valid_or_test, model_name, params): instance_gen = globals()[model_name] #self.model = torch.load(model_path) self.model = instance_gen(dataset=dataset, params=params).to("cuda:0") self.model.load_state_dict(torch.load(model_path)) self.model.eval() self.dataset = dataset self.valid_or_test = valid_or_test self.measure = Measure()
def on_message(client, userdata, message): logger.debug("rcvd: " + message.topic + "/" + str(message.payload.decode("utf-8"))) if message.topic == "addToPool": as_bytes = bytes.fromhex(message.payload.decode("utf-8")) res = decipher.decrypt(as_bytes) m = Measure("none") m.unpack(res) logger.info(m)
def readMeasures(self, fileName, timeLimit=None, typeOffset=0) : with open(fileName, 'rb') as csvfile : reader = csv.reader(csvfile, delimiter=' ') typeOfTest = 0 measure = None listOfMeasures = [] nbRows = 0; for row in reader: if (len(row) < 2) : continue if (row[0].startswith("StartOfNewTest") ) : #Getting the type of test typeOfTest = int(row[1].strip()) + typeOffset print "New test" elif (row[0].strip() == "Time") : #New measure adding the previous one and creating a new one if (measure != None) : listOfMeasures.append(measure) measure = Measure(typeOfTest) elif (isNumber(row[0].strip())) : if (timeLimit != None) : if float(row[0])/10000.0 > timeLimit : #We're done for this measure continue #Adding a row of measures : [time, command, position, speed] measure.addValues([float(row[0])/10000.0, float(row[1])*self.rawCommandToVoltage, float(row[2])*self.rawPositionToRad, float(row[3])*self.rawPositionToRad]) nbRows = nbRows + 1 else : print "Weird line : ", row print "Added ", nbRows, " rows and ", len(listOfMeasures), " measures" return listOfMeasures
def eval_dataset(self, dataset): """ Evaluate the dataset with the given model. """ # Reset normalization parameter settings = ["raw", "fil"] normalizer = 0 # Contains the measure values for the given dataset (e.g. test for arity 2) current_rank = Measure() for i, fact in enumerate(dataset): arity = self.dataset.max_arity - (fact == 0).sum() for j in range(1, arity + 1): normalizer += 1 queries = self.create_queries(fact, j) for raw_or_fil in settings: r, e1, e2, e3, e4, e5, e6 = self.add_fact_and_shred( fact, queries, raw_or_fil) if (self.model_name == "HypE"): ms = np.zeros((len(r), 6)) bs = np.ones((len(r), 6)) ms[:, 0:arity] = 1 bs[:, 0:arity] = 0 ms = torch.tensor(ms).float().to(self.device) bs = torch.tensor(bs).float().to(self.device) sim_scores = self.model(r, e1, e2, e3, e4, e5, e6, ms, bs).cpu().data.numpy() elif (self.model_name == "MTransH"): ms = np.zeros((len(r), 6)) ms[:, 0:arity] = 1 ms = torch.tensor(ms).float().to(self.device) sim_scores = self.model(r, e1, e2, e3, e4, e5, e6, ms).cpu().data.numpy() else: sim_scores = self.model(r, e1, e2, e3, e4, e5, e6).cpu().data.numpy() # Get the rank and update the measures rank = self.get_rank(sim_scores) current_rank.update(rank, raw_or_fil) # self.measure.update(rank, raw_or_fil) if i % 1000 == 0: print("--- Testing sample {}".format(i)) return current_rank, normalizer
def __init__(self, config): self.config = config gps = GPS(config) measure = Measure() self.mobile = Mobile(gps, measure, config) self.step = config['step']
def __init__(self, name): result_factory = testresult.SingleStatisticResultFactory() measurements = [ MeasurementCpuTime(result_factory.create_result()), MeasurementWallTime(result_factory.create_result()), MeasurementVmSize(result_factory.create_result()) ] super(TestCaseWithBasicMeasurements, self).__init__(name, Measure(measurements))
def FindDomain(self): """ Finds the region that all variables are defined. """ dom = [list(tpl) for tpl in self.Domain] for v_idx in range(self.num_vars): v = self.Vars[v_idx] for f in self.OrthSys: Orth = self.OrthSys[f] if v in Orth.Vars: idx = Orth.Vars.index(v) rng = Orth.Domain[idx] if (dom[v_idx][0] == None) or (rng[0] < dom[v_idx][0]): dom[v_idx][0] = rng[0] if (dom[v_idx][1] == None) or (rng[1] > dom[v_idx][1]): dom[v_idx][1] = rng[1] self.Domain = [tuple(lst) for lst in dom] # defines the default sampling measure object self.SampleMeasure = Measure(self.Domain, 1)
class Tester: def __init__(self, dataset, model_path, valid_or_test, model_name, params): instance_gen = globals()[model_name] #self.model = torch.load(model_path) self.model = instance_gen(dataset=dataset, params=params).to("cuda:0") self.model.load_state_dict(torch.load(model_path)) self.model.eval() self.dataset = dataset self.valid_or_test = valid_or_test self.measure = Measure() def getRank(self, sim_scores): #assuming the test fact is the first one return (sim_scores > sim_scores[0]).sum() + 1 def replaceAndShred(self, fact, raw_or_fil, head_or_tail): head, rel, tail, absolute_time, years, months, days = fact if head_or_tail == "head": #predict head ret_facts = [(i, rel, tail, absolute_time, years, months, days) for i in range(self.dataset.numEnt())] if head_or_tail == "tail": #predict tail ret_facts = [(head, rel, i, absolute_time, years, months, days) for i in range(self.dataset.numEnt())] if raw_or_fil == "raw": ret_facts = [tuple(fact)] + ret_facts elif raw_or_fil == "fil": ret_facts = [ tuple(fact) ] + list(set(ret_facts) - self.dataset.all_facts_as_tuples) return shredFacts(np.array(ret_facts)) def test(self): for i, fact in enumerate(self.dataset.data[self.valid_or_test]): settings = ["fil"] for raw_or_fil in settings: for head_or_tail in ["head", "tail"]: heads, rels, tails, absolute_time, years, months, days = self.replaceAndShred( fact, raw_or_fil, head_or_tail) sim_scores = self.model(heads, rels, tails, absolute_time, years, months, days).cpu().data.numpy() rank = self.getRank(sim_scores) self.measure.update(rank, raw_or_fil) self.measure.print_() print("~~~~~~~~~~~~~") self.measure.normalize(len(self.dataset.data[self.valid_or_test])) self.measure.print_() return self.measure.mrr["fil"]
class MockTester(Tester): def __init__(self): self.measure = None def test(self): torch.manual_seed(0) # sim_queries = torch.cat((torch.randint(vocab_size, (query_size, 1)), torch.randint(vocab_size, (query_size, 1)), torch.randint(vocab_size, (query_size, 1))), 1).int() settings = ['raw'] query_size = 1000 vocab_size = 1000 for raw_or_fil in settings: for head_or_tail in ["head"]: torch.manual_seed(0) self.measure = Measure() random_scores = torch.rand((query_size, vocab_size)) print(f"current settings {head_or_tail} + {raw_or_fil}") for fact in random_scores: rank = self.getRank(fact) self.measure.update(rank, raw_or_fil) self.measure.normalize(query_size) self.measure.print_()
def addMeasure(self, vector, length=4): # Takes in a vector and generates a measure based on the features of the vector self.current_measure += 1 if self.binaryProbability(vector.getRepetition()) == True: if self.current_measure > 1: measure_index = random.choice(range(len(self.measures))) the_measure = self.measures[measure_index] new_vec = the_measure.getVector( ) # If we're repeating, we want the user's opinion to reflect on the randomly selected vector's properties, except for the repetition property. This should reflect upon the current vectors (called 'vector') repetition property. So we make a new vector that has the randomly selected vector's properties except for repetition, which is taken from the current vector that caused us to repeat new_vec.setRepetition(vector.getRepetition()) the_measure.setVector(new_vec) self.measures.append(the_measure) # Add the first measure. return measure = Measure() measure.setVector(vector) time_left = float(length) self.chanceToChangeOctave(vector.getOctaveChance()) while time_left > 0: pitch_list = self.buildNoteRatioList(vector.getGoodNoteRatio()) if measure == []: # First note in the measure, just choose an item from our possible pitches randomly. <- Could be improved pitch = random.choice(pitch_list) else: # otherwise, incorporate the variety distance weight. pitch_list.extend( self.buildDistanceRatio(measure[-1], vector.getNoteDistance())) pitch = random.choice(pitch_list) if time_left != 4: duration = self.pickDuration(vector.getVarietyDuration(), time_left, measure) else: duration = random.choice( [.25, .5, 1, 2, 4]) # First note, choose a duration randomly note = Note(4 - time_left, 1, pitch, duration, 100) measure.addNote(note) # Check to see if adding a chord if self.binaryProbability(vector.getOneVsChord()): # We rolled a chord. pitches_in_chord = self.chordList(pitch) for chord_pitch in pitches_in_chord: chord_note = Note(4 - time_left, 1, chord_pitch, duration, 100) measure.addNote(chord_note) time_left -= duration self.measures.append(measure)
def initial_population(options): population_count = options['genetic']['population'] num_measures = options['genetic']['num_measures'] rest_chance = options['genetic']['rest_chance'] min_note = options['genetic']['min_note'] max_note = options['genetic']['max_note'] random.seed() # population is represented as a list of lists where the inner lists are each a list of measures (which represents one gene) # TODO: add parameters for random_measure from user options return [[ Measure.random_measure(rest_chance, min_note, max_note) for _ in range(num_measures) ] for _ in range(population_count)]
def __init__(self, variables, var_range, env='sympy'): """ To initiate an orthogonal system of functions, one should provide a list of symbolic variables ``variables`` and the range of each these variables as a list of lists ``var_range``. """ assert (type(variables) is list) and ( type(var_range) is list), """The OrthSystem class object requires two lists as inputs: (1) list of symbolic variables; (2) range of each variable.""" self.EnvAvail = self.DetSymEnv() if self.EnvAvail == []: raise Exception("No Symbolic tool is available.") elif (env in self.EnvAvail): self.Env = env else: raise Exception("The selected symbolic tool is not supported.") self.Vars = variables self.num_vars = len(self.Vars) self.Domain = var_range self.measure = Measure(self.Domain, 1) self.OriginalBasis = [] self.OrthBase = [] self.Numerical = False self.CommonSymFuncs(self.Env)
def testNoteSorting(self): measure = Measure() class Note(object): def __init__(self, start): self.start = start noteA = Note(start=5) noteB = Note(start=6) measure.addNote(noteB) measure.addNote(noteA) self.assertEqual([5, 6], [note.start for note in measure.orderedNotes()]) noteC = Note(start=17) noteD = Note(start=1) measure.addNote(noteD) measure.addNote(noteC) measure.addNote(Note(start=5)) self.assertEqual([1, 5, 5, 6, 17], [note.start for note in measure.orderedNotes()])
def make_score(score, parsed_block, bar_to_att): measure_len = len(parsed_block[0]) for measure_num in range(0, measure_len): cur_measure = Measure() measure_width = len(parsed_block) for voice_num in range(0, measure_width): cur_measure.add_voice(beats_to_voice(parsed_block[voice_num][measure_num])) if measure_num in bar_to_att: for attribute in bar_to_att[measure_num]: cur_measure.add_attribute(attribute) score.add_measure(cur_measure) return score
class Tester: def __init__(self, dataset, model_path, valid_or_test,model_name): self.model = torch.load(model_path) self.model.eval() self.model_name=model_name self.dataset = dataset self.valid_or_test = valid_or_test self.measure = Measure() def getRank(self, sim_scores):#assuming the test fact is the first one return (sim_scores > sim_scores[0]).sum() + 1 def replaceAndShred(self,fact,raw_or_fil,head_or_tail): head,rel,tail,date=fact if head_or_tail == "head": ret_facts = [(i, rel, tail, date) for i in range(self.dataset.numEnt())] if head_or_tail == "tail": ret_facts = [(head, rel, i, date) for i in range(self.dataset.numEnt())] if raw_or_fil == "raw": ret_facts = [tuple(fact)] + ret_facts elif raw_or_fil == "fil": ret_facts = [tuple(fact)] + list(set(ret_facts) - self.dataset.all_facts_as_tuples) return shredFacts(np.array(ret_facts)) def test(self): for i, fact in enumerate(self.dataset.data[self.valid_or_test]): settings = ["fil"] for raw_or_fil in settings: for head_or_tail in ["head", "tail"]: heads,rels,tails,dates=self.replaceAndShred_ttransd(fact, raw_or_fil, head_or_tail) sim_scores = self.model(heads, rels, tails,dates).cpu().data.numpy() rank = self.getRank(sim_scores) self.measure.update(rank, raw_or_fil) self.measure.print_() print("~~~~~~~~~~~~~") self.measure.normalize(len(self.dataset.data[self.valid_or_test])) self.measure.print_() return self.measure.mrr["fil"]
def __init__(self, filename): self.measures = [] # Read measures from a file with open(filename, 'r') as songFile: currentMeasure = Measure() # Make a new measure for line in songFile: # This adds a new octave to a measure if (line == "\n"): # Blank lines separate measures self.measures.append(currentMeasure) currentMeasure = Measure() # Reset measure, continue else: currentMeasure.addOctave(line) # add line to current thing # Append final non-newline terminated measure before closing file # File automatically closed following end of `with` block self.measures.append(currentMeasure)
def Run(self): print("Analyzing problems") with open(self.options.currentProblemFile, 'rt') as problemsFile: problemIndex = 0 for line in problemsFile: problemIndex = problemIndex + 1 print("\n=> Problem #" + str(problemIndex) + " will be analyzed") builder = Builder(line, self.options) problem = self.LoadProblem(builder) problem.SetMeasure(Measure(problem)) problem.SetBounds(Bounds(problem)) problem.bounds.CalculateBounds() solver = Solver(problem) solver.Solve() problem.measure.Write() print("Finished analyzer run") # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
def mutate(population, percentage): result = [] for gene in population: new_gene = [] for measure in gene: new_notes = [] for note in measure.notes: new_note = note if random.random() < percentage: # for now we vary the note number since # changing the note length would require the measure to be fixed new_note.midi_num = math.floor( random.gauss( note.midi_num, measurements.SingleMeasurements.midi_number_stdev( measure))) new_notes.append(new_note) new_gene.append(Measure(new_notes)) result.append(new_gene) return result
def crossover(population, percentage, granularity): mates = random.sample(population, int(len(population) * percentage)) result = [] if len(mates) % 2 == 1: mates.append(random.choice(population)) for i in range(0, len(mates), 2): crossover_point = Decimal(random.randint(0, 2**granularity)) crossover_point *= Decimal(1 / (2**granularity)) first = [] second = [] for measure_index in range(len(population[i])): first_split = population[i][measure_index].split(crossover_point) second_split = population[i + 1][measure_index].split(crossover_point) first_measure = Measure(first_split[0] + second_split[1]) first_measure.fix() first.append(first_measure) second_measure = Measure(first_split[1] + second_split[0]) second_measure.fix() second.append(second_measure) result.append(first) result.append(second) return result
def addMeasure(self,vector,length=4): # Takes in a vector and generates a measure based on the features of the vector self.current_measure +=1 if self.binaryProbability(vector.getRepetition()) == True: if self.current_measure > 1: measure_index = random.choice(range(len(self.measures))) the_measure = self.measures[measure_index] new_vec = the_measure.getVector() # If we're repeating, we want the user's opinion to reflect on the randomly selected vector's properties, except for the repetition property. This should reflect upon the current vectors (called 'vector') repetition property. So we make a new vector that has the randomly selected vector's properties except for repetition, which is taken from the current vector that caused us to repeat new_vec.setRepetition(vector.getRepetition()) the_measure.setVector(new_vec) self.measures.append(the_measure) # Add the first measure. return measure = Measure() measure.setVector(vector) time_left = float(length) self.chanceToChangeOctave(vector.getOctaveChance()) while time_left > 0: pitch_list = self.buildNoteRatioList(vector.getGoodNoteRatio()) if measure == []: # First note in the measure, just choose an item from our possible pitches randomly. <- Could be improved pitch = random.choice(pitch_list) else: # otherwise, incorporate the variety distance weight. pitch_list.extend(self.buildDistanceRatio(measure[-1],vector.getNoteDistance())) pitch = random.choice(pitch_list) if time_left != 4: duration = self.pickDuration(vector.getVarietyDuration(),time_left,measure) else: duration = random.choice([.25,.5,1,2,4]) # First note, choose a duration randomly note = Note(4-time_left,1,pitch,duration,100) measure.addNote(note) # Check to see if adding a chord if self.binaryProbability(vector.getOneVsChord()): # We rolled a chord. pitches_in_chord = self.chordList(pitch) for chord_pitch in pitches_in_chord: chord_note = Note(4-time_left,1,chord_pitch,duration,100) measure.addNote(chord_note) time_left -= duration self.measures.append(measure)
def __init__(self, variables, var_range, env='sympy'): """ To initiate an orthogonal system of functions, one should provide a list of symbolic variables ``variables`` and the range of each these variables as a list of lists ``var_range``. """ assert (type(variables) is list) and (type(var_range) is list), """The OrthSystem class object requires two lists as inputs: (1) list of symbolic variables; (2) range of each variable.""" self.EnvAvail = self.DetSymEnv() if self.EnvAvail == []: raise Exception("No Symbolic tool is available.") elif (env in self.EnvAvail): self.Env = env else: raise Exception("The selected symbolic tool is not supported.") self.Vars = variables self.num_vars = len(self.Vars) self.Domain = var_range self.measure = Measure(self.Domain, 1) self.OriginalBasis = [] self.OrthBase = [] self.Numerical = False self.CommonSymFuncs(self.Env)
def __init__(self): self._max_window = 20 # 滑动窗口中的frame集合 self._frames_DB = [] # 滑动窗口中mappoint集合,里面元素为字典(描述子->Mappoints类) self._mappoints_DB = {} self._state = np.array([]) self._descriptor2state = {} self._frameid2state = {} self._jacobi = np.array([]) self._error = np.array([]) self._measure = Measure() self._prior_matrix = np.array([]) self._prior_matrixb = np.array([]) self._lastframe = Frame(0) self._coefficient = [[], []] self._measure_count = 0 # draw self._esti_pose = [[],[]] self._f2ftrack = [] self._f2ftrack_show = [[],[]] self._slideframes = [[], []] self._slidepoints = [[],[]]
class Tester: def __init__(self, dataset, model_path): self.device = torch.device( 'cuda:0' if torch.cuda.is_available() else 'cpu') self.model = torch.load(model_path, map_location=self.device) self.model.eval() self.dataset = dataset self.measure = Measure() self.all_facts_as_set_of_tuples = set(self.allFactsAsTuples()) def get_rank(self, sim_scores): #assuming the test fact is the first one return (sim_scores > sim_scores[0]).sum() + 1.0 def create_queries(self, fact, head_or_tail): head, rel, tail = fact if head_or_tail == "head": return [(i, rel, tail) for i in range(self.dataset.num_ent)] elif head_or_tail == "tail": return [(head, rel, i) for i in range(self.dataset.num_ent)] def add_fact_and_shred(self, fact, queries, raw_or_fil): if raw_or_fil == "raw": result = [tuple(fact)] + queries elif raw_or_fil == "fil": result = [tuple(fact) ] + list(set(queries) - self.all_facts_as_set_of_tuples) return self.shred_facts(result) # def replace_and_shred(self, fact, raw_or_fil, head_or_tail): # ret_facts = [] # head, rel, tail = fact # for i in range(self.dataset.num_ent()): # if head_or_tail == "head" and i != head: # ret_facts.append((i, rel, tail)) # if head_or_tail == "tail" and i != tail: # ret_facts.append((head, rel, i)) # if raw_or_fil == "raw": # ret_facts = [tuple(fact)] + ret_facts # elif raw_or_fil == "fil": # ret_facts = [tuple(fact)] + list(set(ret_facts) - self.all_facts_as_set_of_tuples) # return self.shred_facts(ret_facts) def test(self): settings = ["raw", "fil"] print(len(self.dataset.test_set)) for i, fact in enumerate(self.dataset.test_set): print(i) for head_or_tail in ["head", "tail"]: queries = self.create_queries(fact, head_or_tail) for raw_or_fil in settings: h, r, t = self.add_fact_and_shred(fact, queries, raw_or_fil) sim_scores = self.model.forward(h, r, t).cpu().data.numpy() rank = self.get_rank(sim_scores) self.measure.update(rank, raw_or_fil) self.measure.normalize(len(self.dataset.test_set)) self.measure.print_() return self.measure.mrr["fil"] def shred_facts(self, triples): heads = [triples[i][0] for i in range(len(triples))] rels = [triples[i][1] for i in range(len(triples))] tails = [triples[i][2] for i in range(len(triples))] return torch.LongTensor(heads).to( self.device), torch.LongTensor(rels).to( self.device), torch.LongTensor(tails).to(self.device) def allFactsAsTuples(self): tuples = [] for fact in self.dataset.test_set: tuples.append(tuple(fact)) for fact in self.dataset.train_set: tuples.append(tuple(fact)) for fact in self.dataset.valid_set: tuples.append(tuple(fact)) print('fil setting successful') return tuples
class OrthSystem(Foundation): """ ``OrthogonalSystem`` class produces an orthogonal system of functions according to a suggested basis of functions and a given measure supported on a given region. This basically performs a 'Gram-Schmidt' method to extract the orthogonal basis. The inner product is obtained by integration of the product of functions with respect to the given measure (more accurately, the distribution). To initiate an instance of this class one should provide a list of symbolic variables `variables` and the range of each variable as a list of lists ``var_range``. To initiate an orthogonal system of functions, one should provide a list of symbolic variables ``variables`` and the range of each these variables as a list of lists ``var_range``. """ def __init__(self, variables, var_range, env='sympy'): """ To initiate an orthogonal system of functions, one should provide a list of symbolic variables ``variables`` and the range of each these variables as a list of lists ``var_range``. """ assert (type(variables) is list) and (type(var_range) is list), """The OrthSystem class object requires two lists as inputs: (1) list of symbolic variables; (2) range of each variable.""" self.EnvAvail = self.DetSymEnv() if self.EnvAvail == []: raise Exception("No Symbolic tool is available.") elif (env in self.EnvAvail): self.Env = env else: raise Exception("The selected symbolic tool is not supported.") self.Vars = variables self.num_vars = len(self.Vars) self.Domain = var_range self.measure = Measure(self.Domain, 1) self.OriginalBasis = [] self.OrthBase = [] self.Numerical = False self.CommonSymFuncs(self.Env) def PolyBasis(self, n): """ Generates a polynomial basis from variables consisting of all monomials of degree at most ``n``. """ assert n >= 0, "'n' must be a positive integer." from itertools import product B = [] for o in product(range(n + 1), repeat=self.num_vars): if sum(o) <= n: T_ = 1 for idx in range(self.num_vars): T_ *= self.Vars[idx]**o[idx] B.append(T_) return B def FourierBasis(self, n): """ Generates a Fourier basis from variables consisting of all :math:`sin` & :math:`cos` functions with coefficients at most `n`. """ assert n >= 0, "'n' must be a positive integer." from itertools import product B = [] for o in product(range(n + 1), repeat=self.num_vars): if sum(o) <= n: SinCos = product(range(2), repeat=self.num_vars) for ex in SinCos: T_ = 1 for idx in range(self.num_vars): period = self.Domain[idx][1] - self.Domain[idx][0] if o[idx] != 0: if ex[idx] == 0: T_ *= self.cos(2 * self.pi * o[idx] * self.Vars[idx] / period) else: T_ *= self.sin(2 * self.pi * o[idx] * self.Vars[idx] / period) B.append(T_) return list(set(B)) def TensorPrd(self, Bs): """ Takses a list of symbolic bases, each one a list of symbolic expressions and returns the tensor product of them as a list. """ assert (Bs != []), "Can not compute the tensor product of empty bases." from itertools import product TP = product(*Bs) TBase = [] for itm in TP: t_prd = 1 for ent in itm: t_prd = t_prd * ent TBase.append(self.expand(t_prd)) return TBase def SetMeasure(self, M): """ To set the measure which the orthogonal system will be computed, simply call this method with the corresponding distribution as its parameter `dm`; i.e, the parameter is `d(m)` where `m` is the original measure. """ assert isinstance(M, Measure), "The argument must be a `Measure`." self.measure = M def Basis(self, base_set): """ To specify a particular family of function as a basis, one should call this method with a list ``base_set`` of linearly independent functions. """ assert type( base_set) is list, "A list of symbolic functions is expected." self.OriginalBasis = base_set self.num_base = len(self.OriginalBasis) def inner(self, f, g): """ Computes the inner product of the two parameters with respect to the measure ``measure``. """ if self.Env == "sympy": from sympy import lambdify F = lambdify(self.Vars, f * g, "numpy") elif self.Env == "sage": from sage.all import fast_callable h = f * g + self.Vars[0] * 0 H = fast_callable(h, vars=self.Vars) F = lambda *x: H(*x) elif self.Env == 'symengine': from symengine import Lambdify F = lambda *x: Lambdify(self.Vars, [f * g])(x)[0] m = self.measure.integral(F) return m def project(self, f, g): """ Finds the projection of ``f`` on ``g`` with respect to the inner product induced by the measure ``measure``. """ return g * self.inner(f, g) / self.inner(g, g) def FormBasis(self): """ Call this method to generate the orthogonal basis corresponding to the given basis via ``Basis`` method. The result will be stored in a property called ``OrthBase`` which is a list of function that are orthogonal to each other with respect to the measure ``measure`` over the given range ``Domain``. """ for f in self.OriginalBasis: nf = 0 for u in self.OrthBase: nf += self.project(f, u) nf = f - nf F = self.expand(nf / self.sqrt(self.inner(nf, nf))) self.OrthBase.append(F) self.num_base = len(self.OrthBase) def SetOrthBase(self, base): """ Sets the orthonormal basis to be the given `base`. """ assert (base != []), "Invalid basis." self.OrthBase = base self.num_base = len(self.OrthBase) def Series(self, f): """ Given a function `f`, this method finds and returns the coefficients of the series that approximates `f` as a linear combination of the elements of the orthogonal basis. """ cfs = [] for b in self.OrthBase: cfs.append(self.inner(f, b)) return cfs
#!/usr/bin/python from nanpy import (ArduinoApi, SerialManager) from time import sleep from time import time from measure import Measure #trigpin=4 #echopin=3 trigpin = 2 echopin = 3 try: connection = SerialManager() a = ArduinoApi(connection=connection) a.pinMode(trigpin, a.OUTPUT) a.pinMode(echopin, a.INPUT) m = Measure(connection=connection) print("Connected to Arduino") except: print("Failed to connect") while True: distance = m.getMeasure(trigpin, echopin) if distance > 0: print(distance) sleep(.25)
def _dotask(self, task): ''' Esegue il complesso di test prescritti dal task entro il tempo messo a disposizione secondo il parametro tasktimeout ''' # TODO Mischiare i test: down, up, ping, down, up, ping, ecc... if not self._isprobe and self._progress != None: made = self._progress.howmany(datetime.fromtimestamp(timestampNtp()).hour) if made >= MAX_MEASURES_PER_HOUR: self._updatestatus(status.PAUSE) return bandwidth_sem.acquire() # Acquisisci la risorsa condivisa: la banda logger.info('Inizio task di misura verso il server %s' % task.server) # Area riservata per l'esecuzione della misura # -------------------------------------------------------------------------- # TODO Inserire il timeout complessivo di task (da posticipare) try: self._updatestatus(status.PLAY) # Profilazione iniziale del sistema # ------------------------ base_error = 0 if self._profile_system() != 0: base_error = 50000 # ip = sysmonitor.getIp(task.server.ip, 21) dev = sysmonitor.getDev(task.server.ip, 21) t = Tester(dev = dev, host = task.server, timeout = self._testtimeout, username = self._client.username, password = self._client.password) # TODO Pensare ad un'altra soluzione per la generazione del progressivo di misura start = datetime.fromtimestamp(timestampNtp()) id = start.strftime('%y%m%d%H%M') m = Measure(id, task.server, self._client, __version__, start.isoformat()) # Set task timeout alarm # signal.alarm(self._tasktimeout) # Testa gli ftp down # ------------------------ i = 1; while (i <= task.download): self._updatestatus(status.Status(status.PLAY, "Esecuzione Test %d su %d" % (i, task.download + task.upload + task.ping))) try: # Profilazione del sistema error = self._profile_system(sysmonitor.CHECK_ALL); # Esecuzione del test logger.info('Starting ftp download test (%s) [%d]' % (task.ftpdownpath, i)) test = t.testftpdown(task.ftpdownpath) # Gestione degli errori nel test if error > 0 or base_error > 0: test.seterrorcode(error + base_error) # Analisi da contabit self._test_gating(test, DOWN) # Salvataggio della misura logger.debug('Download result: %.3f' % test.value) logger.debug('Download error: %d, %d, %d' % (base_error, error, test.errorcode)) m.savetest(test) i = i + 1 # Prequalifica della linea if (test.value > 0): bandwidth = int(round(test.bytes * 8 / test.value)) logger.debug('Banda ipotizzata in download: %d' % bandwidth) task.update_ftpdownpath(bandwidth) sleep(1) # Cattura delle eccezioni durante la misura except Exception as e: if not datetime.fromtimestamp(timestampNtp()).hour == start.hour: raise e else: logger.warning('Misura sospesa per eccezione %s' % e) self._updatestatus(status.Status(status.ERROR, 'Misura sospesa per errore: %s Aspetto %d secondi prima di proseguire la misura.' % (e, TIME_LAG))) sleep(TIME_LAG) logger.info('Misura in ripresa dopo sospensione. Test download %d di %d' % (i, task.download)) self._updatestatus(status.Status(status.PLAY, 'Proseguo la misura. Misura in esecuzione')) # Testa gli ftp up i = 1; while (i <= task.upload): self._updatestatus(status.Status(status.PLAY, "Esecuzione Test %d su %d" % (i + task.download, task.download + task.upload + task.ping))) try: # Profilazione del sistema error = self._profile_system(sysmonitor.CHECK_ALL); # Esecuzione del test logger.debug('Starting ftp upload test (%s) [%d]' % (task.ftpuppath, i)) test = t.testftpup(self._client.profile.upload * task.multiplier * 1000 / 8, task.ftpuppath) # Gestione degli errori nel test if error > 0 or base_error > 0: test.seterrorcode(error + base_error) # Analisi da contabit self._test_gating(test, UP) # Salvataggio del test nella misura logger.debug('Upload result: %.3f' % test.value) logger.debug('Upload error: %d, %d, %d' % (base_error, error, test.errorcode)) m.savetest(test) i = i + 1 # Prequalifica della linea if (test.value > 0): bandwidth = int(round(test.bytes * 8 / test.value)) logger.debug('Banda ipotizzata in upload: %d' % bandwidth) self._client.profile.upload = bandwidth sleep(1) # Cattura delle eccezioni durante la misura except Exception as e: if not datetime.fromtimestamp(timestampNtp()).hour == start.hour: raise e else: logger.warning('Misura sospesa per eccezione %s' % e) self._updatestatus(status.Status(status.ERROR, 'Misura sospesa per errore: %s Aspetto %d secondi prima di proseguire la misura.' % (e, TIME_LAG))) sleep(TIME_LAG) logger.info('Misura in ripresa dopo sospensione. Test upload %d di %d' % (i, task.upload)) self._updatestatus(status.Status(status.PLAY, 'Proseguo la misura. Misura in esecuzione')) # Testa i ping i = 1 while (i <= task.ping): self._updatestatus(status.Status(status.PLAY, "Esecuzione Test %d su %d" % (i + task.download + task.upload, task.download + task.upload + task.ping))) try: # Profilazione del sistema error = self._profile_system(sysmonitor.CHECK_MEDIUM); # Esecuzione del test logger.debug('Starting ping test [%d]' % i) test = t.testping() # Gestione degli errori nel test if error > 0 or base_error > 0: test.seterrorcode(error + base_error) # Salvataggio del test nella misura logger.debug('Ping result: %.3f' % test.value) logger.debug('Ping error: %d, %d, %d' % (base_error, error, test.errorcode)) m.savetest(test) i = i + 1 if ((i - 1) % task.nicmp == 0): sleep(task.delay) # Cattura delle eccezioni durante la misura except Exception as e: if not datetime.fromtimestamp(timestampNtp()).hour == start.hour: raise e else: logger.warning('Misura sospesa per eccezione %s' % e) self._updatestatus(status.Status(status.ERROR, 'Misura sospesa per errore: %s Aspetto 10 secondi prima di proseguire la misura.' % e)) sleep(10) logger.info('Misura in ripresa dopo sospensione. Test ping %d di %d' % (i, task.ping)) self._updatestatus(status.Status(status.PLAY, 'Proseguo la misura. Misura in esecuzione')) # Unset task timeout alarm # signal.alarm(0) # Spedisci il file al repository delle misure sec = datetime.fromtimestamp(timestampNtp()).strftime('%S') f = open('%s/measure_%s%s.xml' % (self._outbox, m.id, sec), 'w') f.write(str(m)) # Aggiungi la data di fine in fondo al file f.write('\n<!-- [finished] %s -->' % datetime.fromtimestamp(timestampNtp()).isoformat()) f.close() if (not self._local): upload = self._upload(f.name) if upload: self._updatestatus(status.Status(status.OK, 'Misura terminata con successo.')) else: self._updatestatus(status.Status(status.ERROR, 'Misura terminata ma un errore si è verificato durante il suo invio.')) else: self._updatestatus(status.Status(status.OK, 'Misura terminata.')) logger.info('Fine task di misura.') except RuntimeWarning: self._updatestatus(status.Status(status.ERROR, 'Misura interrotta per timeout.')) logger.warning('Timeout during task execution. Time elapsed > %1f seconds ' % self._tasktimeout) except Exception as e: logger.error('Task interrotto per eccezione durante l\'esecuzione di un test: %s' % e) self._updatestatus(status.Status(status.ERROR, 'Misura interrotta. %s Attendo %d secondi' % (e, self._polling))) bandwidth_sem.release() # Rilascia la risorsa condivisa: la banda
def font_obj(self, font_size): measure = Measure(pix_per_mm=self.pix_per_mm) measure.point = font_size return measure
class Collocation(Foundation): """ The ``Collocation`` class tries to approximate the solutions of a system of partial differential equations with respect to an orthogonal system of functions. To initiate an instance of this class one needs to provide two set of parameters: 1) List of independent symbolic variables `variables`; 2) List of unknown functions to be found that depend on the independent variables ``ufunc``. """ def __init__(self, variables, ufunc, env='sympy'): Env = self.DetSymEnv() if Env == []: raise Exception("No Symbolic tool is available.") if env not in Env: raise Exception("The selected symbolic tool is not available.") self.Env = env # The selected tool for symbolic computations self.CommonSymFuncs(self.Env) self.Vars = variables # Symbolic variables self.num_vars = len(variables) # Number of symbolic variables self.uFuncs = ufunc # Unknown functions self.num_funcs = len(ufunc) # Number of unknown functions # Number of elements in the orthogonal basis self.degree = [1 for _ in ufunc] self.EQs = [] # Lists of functional equations self.Cnds = [] # Storage for initial and boundary conditions self.CndVals = [] # Storage for the values of CND self.Coeffs = {} self.Apprx = {} # Approximate solutions to uFuncs self.Points = [] # Collocation points self.OrthSys = {} # Orthogonal systems of functions corresponding to uFuncs self.Solver = 'scipy' # The solver to find the roots self.SolverOption = 'lm' # Specifies scipy solver # Reserved for the domain of variables self.Domain = [(None, None) for v in self.Vars] self.SampleMeasure = None # Reserved for the sampling measure self.Verbose = False # Set True to see some messages about the procedure # Determines the final status of the solver: `True` for success and # `False` for fail self.Success = None self.init_guess_bnd = 0.1 # the initial point for solver. Its dimension must be equal to number # of unknown coefficients self.InitPoint = [] self.CfSyms = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w' 'x', 'y', 'z'] def SetOrthSys(self, obj, func): """ To approximate the solutions of the system of pdes, the class requires an orthogonal system of functions ``OrthSystem``. This method accepts such a system. """ assert isinstance( obj, OrthSystem), "An object of type `OrthSystem` is expected." assert func in self.uFuncs, "`func` must be a function symbol given at initiation." idx = self.uFuncs.index(func) self.OrthSys[idx] = obj self.degree[idx] = obj.num_base def FindDomain(self): """ Finds the region that all variables are defined. """ dom = [list(tpl) for tpl in self.Domain] for v_idx in range(self.num_vars): v = self.Vars[v_idx] for f in self.OrthSys: Orth = self.OrthSys[f] if v in Orth.Vars: idx = Orth.Vars.index(v) rng = Orth.Domain[idx] if (dom[v_idx][0] == None) or (rng[0] < dom[v_idx][0]): dom[v_idx][0] = rng[0] if (dom[v_idx][1] == None) or (rng[1] > dom[v_idx][1]): dom[v_idx][1] = rng[1] self.Domain = [tuple(lst) for lst in dom] # defines the default sampling measure object self.SampleMeasure = Measure(self.Domain, 1) def setSampleMeasure(self, meas): """ Sets the measure over the domain for sampling collocation points in case of necessity. """ assert isinstance( obj, Measure), "The input must be an instance of a `Measure` object." self.SampleMeasure = meas def Equation(self, eq): """ To enter the system of equations, use this meyhod with a list of equations as input. """ if type(eq) is list: self.EQs += eq else: self.EQs.append(eq) def Condition(self, eq, val): """ List of initial and boundary conditions. """ # if eq not in self.Cnds: self.Cnds.append(eq) self.CndVals.append(val) def setSolver(self, solver, optn='lm'): """ Currently only two solvers are supported: 1. the `sage`\`s defult solver for rather simple system of algebraic equations. 2. the `scipy`\`s `fsolves` to handel more complex and larger systems. It also supports the following solvers from scipy: + `hybr` + `lm` (defauls) + `broyden1` + `broyden2` + `anderson` + `krylov` + `df-sane` """ self.Solver = solver.lower() self.SolverOption = optn def CollPoints(self, pnts): """ Accepts alist of collocation point ``pnts``, to form the algebraic system of equations and find the coefficients of the orthogonal functions from ``OrthSystem.OrthBase``. Each point must be either a list or a tuple. """ self.Points += pnts def collocate(self): """ Internal use: generates the system of equations for coefficients to be used via collocation points. """ """if self.Env == 'sympy': from sympy import Symbol as var from sympy import Subs, expand, diff elif self.Env == 'sage': from sage.all import var, expand, diff elif self.Env == 'symengine': from symengine import Symbol as var from symengine import expand, diff""" # symbols for coefficients var_syms = self.CfSyms # Produce enough symbolic variables self.CF = {var_syms[s]: [self.Symbol('%s%d' % (var_syms[s], i)) for i in range( self.degree[s])] for s in range(self.num_funcs)} self.SymCF = [] for s in self.CF: self.SymCF += self.CF[s] self.SR = {} self.REq = [] # loop over unknown functions to be found for f_idx in range(self.num_funcs): s = var_syms[f_idx] T = 0 # loop over elements of orthogonal basis for i in range(self.degree[f_idx]): T += self.CF[s][i] * (self.OrthSys[f_idx].OrthBase[i]) self.SR[s] = T # loop over entered equations EQ_num = 0 for eq in self.EQs: f_idx = 0 Teq = eq for f in self.uFuncs: for v in self.Vars: if self.Env == 'sage': for d_ord in range(1, 6): Teq = Teq.subs({self.diff(f, v, d_ord): self.diff( self.SR[var_syms[f_idx]], v, d_ord)}) if self.Env == 'sympy': Teq = (Teq.subs({f: self.SR[var_syms[f_idx]]})).doit() elif self.Env == 'symengine': Teq = (Teq.msubs({f: self.SR[var_syms[f_idx]]})) Teq = ( Teq.msubs({self.diff(f, v): self.diff(self.SR[var_syms[f_idx]], v)})) Teq = (Teq.msubs({self.diff(self.diff(f, v), v): self.diff( self.diff(self.SR[var_syms[f_idx]], v), v)})) f_idx += 1 if Teq not in self.REq: self.REq.append(Teq) EQ_num += 1 if self.Verbose: print "Equation # %d generated." % (EQ_num) # loop over initial and boundary conditions cnd_idx = 0 for eq in self.Cnds: f_idx = 0 Teq = eq for f in self.uFuncs: for v_idx in range(self.num_vars): v = self.Vars[v_idx] Teq = Teq.subs( {self.diff(f, v): self.diff(self.SR[var_syms[f_idx]], v)}) if self.Env == 'sympy': Teq = (Teq.subs({f: self.SR[var_syms[f_idx]]})).doit() elif self.Env == 'symengine': Teq = self.expand( Teq.msubs({f: self.SR[var_syms[f_idx]]})) else: Teq = self.expand( Teq.subs({f: self.SR[var_syms[f_idx]]})) f_idx += 1 # Teq = self.expand(Teq.subs({self.Vars[v]:self.CndVals[cnd_idx][v] # for v in range(len(self.CndVals[cnd_idx]))})) # needs more work # (index of variables could be off) Teq = Teq.subs({self.Vars[v]: self.CndVals[cnd_idx][v] for v in range(len(self.CndVals[cnd_idx]))}) if Teq not in self.REq: self.REq.append(Teq) cnd_idx += 1 if self.Verbose: print "Condition # %d added." % (cnd_idx) def PlugPoints(self): """ Internal use: plug in collocation points to elliminate independent variables and keep the coefficients. """ # Plug in the collocation points to form the algebraic equations numeric_eqs = [] for p in self.Points: chg = {self.Vars[i]: p[i] for i in range(self.num_vars)} for eq in self.REq: tp1 = type(eq) Teq = eq.subs(chg) tp2 = type(Teq) if (tp1 == tp2) and (Teq not in numeric_eqs): numeric_eqs.append(Teq) if len(numeric_eqs) >= len(self.SymCF): break if len(numeric_eqs) >= len(self.SymCF): break if len(numeric_eqs) != len(self.SymCF): raise Exception( "Number of points and equations are not equal! Check the conditions.") if self.Verbose: print "Solving the system of equations numerically to extract coefficients ..." # Solve the algebraic equations if self.Solver == 'sage': if self.Env != 'sage': raise Exception( "Sage solver is not available in selected symbolic environment.") sols = solve(numeric_eqs, self.SymCF, solution_dict=True) sols = sols[0] return sols elif self.Solver in ['scipy']: from scipy import optimize as opt from random import uniform if self.Env == 'sympy': from sympy import lambdify f_ = [lambdify(self.SymCF, (eq.lhs - eq.rhs), "numpy") for eq in numeric_eqs] def f(x): z = tuple(float(x.item(i)) for i in range(len(self.SymCF))) return [fn(*z) for fn in f_] elif self.Env == 'symengine': from symengine import sympify from sympy import lambdify t_eqs = [sympify(eq) for eq in numeric_eqs] f_ = [lambdify(self.SymCF, eq, "numpy") for eq in t_eqs] def f(x): z = tuple(float(x.item(i)) for i in range(len(self.SymCF))) return [fn(*z) for fn in f_] elif self.Env == 'sage': def f(x): chng = {} U = self.SymCF n_var = len(U) chng = {U[i]: float(x.item(i)) for i in range(n_var)} EQs_ = [] for eq in numeric_eqs: teq = eq.lhs() - eq.rhs() EQs_.append(teq.subs(chng).n()) return EQs_ nvars = len(self.SymCF) if self.Solver == 'scipy': if self.InitPoint != []: init_point = tuple(self.InitPoint) else: init_point = tuple( uniform(-self.init_guess_bnd, self.init_guess_bnd) for _ in range(nvars)) sol = opt.root(f, init_point, method=self.SolverOption) if sol.success: sols = {self.SymCF[i]: list(sol.x)[i] for i in range(nvars)} self.Success = True else: sols = {} self.Success = False if self.Verbose: print sol.message return sols def Solve(self): """ Solves the collocation equations and keep a dictionary of coefficients in ``self.Coeffs`` and returns a list of functions in the span of orthoginal system. """ if self.Verbose: print "Check for collocation points shortage..." if self.SampleMeasure is None: self.FindDomain() num = max(self.degree) - len(self.Points) # Check for too many points if num < 0: raise Exception( "Too many points are associated. Reduce at least %d" % (-num)) cl_points = [] # Add enough random points to match up for variables if num > 0: if self.Verbose: print "Generating %d new collocation point ..." % (num) cl_points = self.SampleMeasure.sample(num) # attaching points self.CollPoints(cl_points) if self.Verbose: print "Attaching %d collocation points:" % (len(self.Points)) for p in self.Points: print p if self.Verbose: print "Generating algebraic equations based on given orthogonal systems of functions ..." self.collocate() if self.Verbose: print "Plug collocation points to extract system of equations ..." self.Coeffs = self.PlugPoints() if self.Verbose: print "Done!" if self.Coeffs != {}: for fn in self.uFuncs: s = self.CfSyms[self.uFuncs.index(fn)] self.Apprx[fn] = self.SR[s].subs(self.Coeffs) return self.Apprx
def __init__(self, domain): assert domain.tdim == 1, \ 'Invalid domain tdim(%d) != 1' % domain.tdim Measure.__init__(self, domain)
# -*- coding: utf-8 -*- # コース1のスクリプト import time from whill import ComWHILL from command_forward import CommandForward from command_turn_left import CommandTurnLeft from command_turn_right import CommandTurnRight from command_stop import CommandStop from command_http import CommandHttp from measure import Measure whill = ComWHILL(port='/dev/tty.usbserial-FT2K21HW') request_speed_mode = 0 measure = Measure() commands = [ CommandForward(whill, measure, 1.2), CommandTurnRight(whill, measure, 45.0), CommandHttp("http://192.168.21.214:3000/change1"), CommandStop(whill, measure, 5000), CommandTurnLeft(whill, measure, 90.0), CommandForward(whill, measure, 1.0), CommandTurnRight(whill, measure, 45.0), CommandHttp("http://192.168.21.214:3000/change2"), CommandStop(whill, measure, 5000), CommandTurnLeft(whill, measure, 135.0), CommandForward(whill, measure, 1.0), CommandTurnRight(whill, measure, 45.0), CommandHttp("http://192.168.21.214:3000/change3"),
def __init__(self, domain): assert domain.gdim == domain.tdim + 1, \ 'Invalid domain tdim(%d) + 1!= gdim(%d)' % (domain.tdim, domain.gdim) Measure.__init__(self, domain)