def format_notes(self, notes): """ Formats the notes into a list of measures. Input - measure: current measure object Output - None """ measure_counter, time_counter = 0, 0 curr_measure = Measure(measure_counter, self.time_signature[0], self.time_signature[1]) for i in range(len(notes)): row = notes.iloc[i] note = Note(row.given_pitch, row.signal, row.loudness, row.time, duration=row.duration, typ=row.typ) if time_counter + row.duration > 4: """FIXME: this fills up the rest of the measure with a rest, but it can be better Ideally it would be smart enough to wrap up a measure if there's little cutoff or tie current note into next measure.""" curr_measure.wrap_up_time() self.addMeasure(curr_measure) measure_counter += 1 time_counter = 0 curr_measure = Measure(measure_counter, self.time_signature[0], self.time_signature[1]) curr_measure.addNote(row) curr_measure.wrap_up_time() self.addMeasure(curr_measure)
def __init__(self, filename): self.measures = [] # Read measures from a file with open(filename, 'r') as songFile: currentMeasure = Measure() # Make a new measure for line in songFile: # This adds a new octave to a measure if (line == "\n"): # Blank lines separate measures self.measures.append(currentMeasure) currentMeasure = Measure() # Reset measure, continue else: currentMeasure.addOctave(line) # add line to current thing # Append final non-newline terminated measure before closing file # File automatically closed following end of `with` block self.measures.append(currentMeasure)
def __init__(self, dataset, model_path, valid_or_test,model_name): self.model = torch.load(model_path) self.model.eval() self.model_name=model_name self.dataset = dataset self.valid_or_test = valid_or_test self.measure = Measure()
def on_message(client, userdata, message): logger.debug("in: " + message.topic + "/" + str(message.payload.decode("utf-8"))) # only message should be receiving data pool if message.topic == f"getPool{args.DeviceID}": logger.info(f"recieved pool") # load json data as object dic = json.loads(message.payload) # shuffle data to obscure in-order matching for AG shuffle(dic) logger.debug(dic) for entry in dic: logger.debug(f"treating {entry}") # get payload payload = entry["entry"] # convert to bytes as_bytes = bytes.fromhex(payload) # decipher payload res = decipher.decrypt(as_bytes) # interpret as measure m = Measure("none") m.unpack(res) logger.info(f"publishing measure {m.MUID}") client.publish("measures", str(m))
def __init__(self, parent, value = 1): self.value = Measure(value, self.unit) self.schematicComponent = parent self.schematic = parent.schematic self.solver = self.schematic.solver self.name = self.schematic.getNextName(self.namePrefix) self.nodes = [None, None]
def readMeasures(self, fileName, timeLimit=None, typeOffset=0) : with open(fileName, 'rb') as csvfile : reader = csv.reader(csvfile, delimiter=' ') typeOfTest = 0 measure = None listOfMeasures = [] nbRows = 0; for row in reader: if (len(row) < 2) : continue if (row[0].startswith("StartOfNewTest") ) : #Getting the type of test typeOfTest = int(row[1].strip()) + typeOffset print "New test" elif (row[0].strip() == "Time") : #New measure adding the previous one and creating a new one if (measure != None) : listOfMeasures.append(measure) measure = Measure(typeOfTest) elif (isNumber(row[0].strip())) : if (timeLimit != None) : if float(row[0])/10000.0 > timeLimit : #We're done for this measure continue #Adding a row of measures : [time, command, position, speed] measure.addValues([float(row[0])/10000.0, float(row[1])*self.rawCommandToVoltage, float(row[2])*self.rawPositionToRad, float(row[3])*self.rawPositionToRad]) nbRows = nbRows + 1 else : print "Weird line : ", row print "Added ", nbRows, " rows and ", len(listOfMeasures), " measures" return listOfMeasures
def __init__(self): self.c = Communicate() self.d = Detection() self.m = Measure() self.msg_list = MSG_LIST # 提示消息字典 self.cmd_dict = {} # 命令字典 self._func_dict() # 填充cmd_dict,将类方法名中后缀为'_(\d+)'的方法添加进命令字典
def __init__(self, dataset, model_path): self.device = torch.device( 'cuda:0' if torch.cuda.is_available() else 'cpu') self.model = torch.load(model_path, map_location=self.device) self.model.eval() self.dataset = dataset self.measure = Measure() self.all_facts_as_set_of_tuples = set(self.allFactsAsTuples())
def __init__(self, dataset, model_path, valid_or_test, model_name, params): instance_gen = globals()[model_name] #self.model = torch.load(model_path) self.model = instance_gen(dataset=dataset, params=params).to("cuda:0") self.model.load_state_dict(torch.load(model_path)) self.model.eval() self.dataset = dataset self.valid_or_test = valid_or_test self.measure = Measure()
def __init__(self, dataset, model_path, valid_or_test): self.device = torch.device( "cuda:0" if torch.cuda.is_available() else "cpu") self.model = torch.load(model_path, map_location=self.device) self.model.eval() self.dataset = dataset self.valid_or_test = valid_or_test self.measure = Measure() self.all_facts_as_set_of_tuples = set(self.allFactsAsTuples())
def __init__(self, name): result_factory = testresult.SingleStatisticResultFactory() measurements = [ MeasurementCpuTime(result_factory.create_result()), MeasurementWallTime(result_factory.create_result()), MeasurementVmSize(result_factory.create_result()) ] super(TestCaseWithBasicMeasurements, self).__init__(name, Measure(measurements))
def __init__(self, dataset, model, valid_or_test, model_name): self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') self.model = model self.model.eval() self.dataset = dataset self.model_name = model_name self.valid_or_test = valid_or_test self.measure = Measure() self.all_facts_as_set_of_tuples = set(self.allFactsAsTuples())
def __init__(self, config): self.config = config gps = GPS(config) measure = Measure() self.mobile = Mobile(gps, measure, config) self.step = config['step']
def on_message(client, userdata, message): logger.debug("rcvd: " + message.topic + "/" + str(message.payload.decode("utf-8"))) if message.topic == "addToPool": as_bytes = bytes.fromhex(message.payload.decode("utf-8")) res = decipher.decrypt(as_bytes) m = Measure("none") m.unpack(res) logger.info(m)
def make_score(score, parsed_block, bar_to_att): measure_len = len(parsed_block[0]) for measure_num in range(0, measure_len): cur_measure = Measure() measure_width = len(parsed_block) for voice_num in range(0, measure_width): cur_measure.add_voice(beats_to_voice(parsed_block[voice_num][measure_num])) if measure_num in bar_to_att: for attribute in bar_to_att[measure_num]: cur_measure.add_attribute(attribute) score.add_measure(cur_measure) return score
def main(raw_requests, requests_file, responses_file): # Initial Configuration result_df = h.load_input_files(raw_requests, requests_file, responses_file) color = 'cyan' measure = Measure(result_df) f = Figlet(font='slant') print(f.renderText('Performance Test')) bytes_received = measure.sum_bytes_count('Raw Response') bytes_sended = measure.sum_bytes_count('Serialized Request') test_duration = measure.calculate_test_duration() total_sending_time = measure.calculate_sending_time() total_receiving_time = measure.calculate_receiving_time() result_df['Latencies'] = measure.calculate_latency() throughput = measure.calculate_throughput() rate = measure.calculate_rate() success_rate = measure.calculate_success_rate() print("""Loaded files: \r\n""" f"""\t»» Prepared Requests: {colored(raw_requests, 'green')}\r\n""" f"""\t»» Sent Requests: {colored(requests_file, 'green')}\r\n""" f"""\t»» Responses: {colored(responses_file, 'green')}\r\n""" """\n""") print('{} {}s'.format(h.format_result_index('Elapsed sending time'), colored(total_sending_time, color))) print('{} {}s'.format(h.format_result_index('Elapsed receiving time'), colored(total_receiving_time, color))) print('{} {}s'.format(h.format_result_index('Test duration'), colored(test_duration, color))) print('{} {} MB'.format(h.format_result_index('Bytes sended'), colored(round(bytes_sended, 3), color))) print('{} {} MB'.format(h.format_result_index('Bytes received'), colored(round(bytes_received, 3), color))) print("""{} """ """{}s, {}s, {}s""".format( h.format_result_index('Latencies [min, max, mean]'), colored(result_df['Latencies'].min(), color), colored(result_df['Latencies'].max(), color), colored(round(result_df['Latencies'].mean(), 3), color))) print("""{} """ """{}, {}R/s, {} MB/s""".format( h.format_result_index('Requests [count, rate, throughput]'), colored(measure.requests_count, color), colored(round(rate, 3), color), colored(round(throughput, 3), color))) print('{} {}% '.format(h.format_result_index('Success rate'), colored(success_rate, color)))
def crossover(population, percentage, granularity): mates = random.sample(population, int(len(population) * percentage)) result = [] if len(mates) % 2 == 1: mates.append(random.choice(population)) for i in range(0, len(mates), 2): crossover_point = Decimal(random.randint(0, 2**granularity)) crossover_point *= Decimal(1 / (2**granularity)) first = [] second = [] for measure_index in range(len(population[i])): first_split = population[i][measure_index].split(crossover_point) second_split = population[i + 1][measure_index].split(crossover_point) first_measure = Measure(first_split[0] + second_split[1]) first_measure.fix() first.append(first_measure) second_measure = Measure(first_split[1] + second_split[0]) second_measure.fix() second.append(second_measure) result.append(first) result.append(second) return result
def addMeasure(self, vector, length=4): # Takes in a vector and generates a measure based on the features of the vector self.current_measure += 1 if self.binaryProbability(vector.getRepetition()) == True: if self.current_measure > 1: measure_index = random.choice(range(len(self.measures))) the_measure = self.measures[measure_index] new_vec = the_measure.getVector( ) # If we're repeating, we want the user's opinion to reflect on the randomly selected vector's properties, except for the repetition property. This should reflect upon the current vectors (called 'vector') repetition property. So we make a new vector that has the randomly selected vector's properties except for repetition, which is taken from the current vector that caused us to repeat new_vec.setRepetition(vector.getRepetition()) the_measure.setVector(new_vec) self.measures.append(the_measure) # Add the first measure. return measure = Measure() measure.setVector(vector) time_left = float(length) self.chanceToChangeOctave(vector.getOctaveChance()) while time_left > 0: pitch_list = self.buildNoteRatioList(vector.getGoodNoteRatio()) if measure == []: # First note in the measure, just choose an item from our possible pitches randomly. <- Could be improved pitch = random.choice(pitch_list) else: # otherwise, incorporate the variety distance weight. pitch_list.extend( self.buildDistanceRatio(measure[-1], vector.getNoteDistance())) pitch = random.choice(pitch_list) if time_left != 4: duration = self.pickDuration(vector.getVarietyDuration(), time_left, measure) else: duration = random.choice( [.25, .5, 1, 2, 4]) # First note, choose a duration randomly note = Note(4 - time_left, 1, pitch, duration, 100) measure.addNote(note) # Check to see if adding a chord if self.binaryProbability(vector.getOneVsChord()): # We rolled a chord. pitches_in_chord = self.chordList(pitch) for chord_pitch in pitches_in_chord: chord_note = Note(4 - time_left, 1, chord_pitch, duration, 100) measure.addNote(chord_note) time_left -= duration self.measures.append(measure)
def eval_dataset(self, dataset): """ Evaluate the dataset with the given model. """ # Reset normalization parameter settings = ["raw", "fil"] normalizer = 0 # Contains the measure values for the given dataset (e.g. test for arity 2) current_rank = Measure() for i, fact in enumerate(dataset): arity = self.dataset.max_arity - (fact == 0).sum() for j in range(1, arity + 1): normalizer += 1 queries = self.create_queries(fact, j) for raw_or_fil in settings: r, e1, e2, e3, e4, e5, e6 = self.add_fact_and_shred( fact, queries, raw_or_fil) if (self.model_name == "HypE"): ms = np.zeros((len(r), 6)) bs = np.ones((len(r), 6)) ms[:, 0:arity] = 1 bs[:, 0:arity] = 0 ms = torch.tensor(ms).float().to(self.device) bs = torch.tensor(bs).float().to(self.device) sim_scores = self.model(r, e1, e2, e3, e4, e5, e6, ms, bs).cpu().data.numpy() elif (self.model_name == "MTransH"): ms = np.zeros((len(r), 6)) ms[:, 0:arity] = 1 ms = torch.tensor(ms).float().to(self.device) sim_scores = self.model(r, e1, e2, e3, e4, e5, e6, ms).cpu().data.numpy() else: sim_scores = self.model(r, e1, e2, e3, e4, e5, e6).cpu().data.numpy() # Get the rank and update the measures rank = self.get_rank(sim_scores) current_rank.update(rank, raw_or_fil) # self.measure.update(rank, raw_or_fil) if i % 1000 == 0: print("--- Testing sample {}".format(i)) return current_rank, normalizer
def FindDomain(self): """ Finds the region that all variables are defined. """ dom = [list(tpl) for tpl in self.Domain] for v_idx in range(self.num_vars): v = self.Vars[v_idx] for f in self.OrthSys: Orth = self.OrthSys[f] if v in Orth.Vars: idx = Orth.Vars.index(v) rng = Orth.Domain[idx] if (dom[v_idx][0] == None) or (rng[0] < dom[v_idx][0]): dom[v_idx][0] = rng[0] if (dom[v_idx][1] == None) or (rng[1] > dom[v_idx][1]): dom[v_idx][1] = rng[1] self.Domain = [tuple(lst) for lst in dom] # defines the default sampling measure object self.SampleMeasure = Measure(self.Domain, 1)
def Run(self): print("Analyzing problems") with open(self.options.currentProblemFile, 'rt') as problemsFile: problemIndex = 0 for line in problemsFile: problemIndex = problemIndex + 1 print("\n=> Problem #" + str(problemIndex) + " will be analyzed") builder = Builder(line, self.options) problem = self.LoadProblem(builder) problem.SetMeasure(Measure(problem)) problem.SetBounds(Bounds(problem)) problem.bounds.CalculateBounds() solver = Solver(problem) solver.Solve() problem.measure.Write() print("Finished analyzer run") # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
def mutate(population, percentage): result = [] for gene in population: new_gene = [] for measure in gene: new_notes = [] for note in measure.notes: new_note = note if random.random() < percentage: # for now we vary the note number since # changing the note length would require the measure to be fixed new_note.midi_num = math.floor( random.gauss( note.midi_num, measurements.SingleMeasurements.midi_number_stdev( measure))) new_notes.append(new_note) new_gene.append(Measure(new_notes)) result.append(new_gene) return result
def __init__(self): self._max_window = 20 # 滑动窗口中的frame集合 self._frames_DB = [] # 滑动窗口中mappoint集合,里面元素为字典(描述子->Mappoints类) self._mappoints_DB = {} self._state = np.array([]) self._descriptor2state = {} self._frameid2state = {} self._jacobi = np.array([]) self._error = np.array([]) self._measure = Measure() self._prior_matrix = np.array([]) self._prior_matrixb = np.array([]) self._lastframe = Frame(0) self._coefficient = [[], []] self._measure_count = 0 # draw self._esti_pose = [[],[]] self._f2ftrack = [] self._f2ftrack_show = [[],[]] self._slideframes = [[], []] self._slidepoints = [[],[]]
def __init__(self, variables, var_range, env='sympy'): """ To initiate an orthogonal system of functions, one should provide a list of symbolic variables ``variables`` and the range of each these variables as a list of lists ``var_range``. """ assert (type(variables) is list) and ( type(var_range) is list), """The OrthSystem class object requires two lists as inputs: (1) list of symbolic variables; (2) range of each variable.""" self.EnvAvail = self.DetSymEnv() if self.EnvAvail == []: raise Exception("No Symbolic tool is available.") elif (env in self.EnvAvail): self.Env = env else: raise Exception("The selected symbolic tool is not supported.") self.Vars = variables self.num_vars = len(self.Vars) self.Domain = var_range self.measure = Measure(self.Domain, 1) self.OriginalBasis = [] self.OrthBase = [] self.Numerical = False self.CommonSymFuncs(self.Env)
def test(self): torch.manual_seed(0) # sim_queries = torch.cat((torch.randint(vocab_size, (query_size, 1)), torch.randint(vocab_size, (query_size, 1)), torch.randint(vocab_size, (query_size, 1))), 1).int() settings = ['raw'] query_size = 1000 vocab_size = 1000 for raw_or_fil in settings: for head_or_tail in ["head"]: torch.manual_seed(0) self.measure = Measure() random_scores = torch.rand((query_size, vocab_size)) print(f"current settings {head_or_tail} + {raw_or_fil}") for fact in random_scores: rank = self.getRank(fact) self.measure.update(rank, raw_or_fil) self.measure.normalize(query_size) self.measure.print_()
# -*- coding: utf-8 -*- # コース1のスクリプト import time from whill import ComWHILL from command_forward import CommandForward from command_turn_left import CommandTurnLeft from command_turn_right import CommandTurnRight from command_stop import CommandStop from command_http import CommandHttp from measure import Measure whill = ComWHILL(port='/dev/tty.usbserial-FT2K21HW') request_speed_mode = 0 measure = Measure() commands = [ CommandForward(whill, measure, 1.2), CommandTurnRight(whill, measure, 45.0), CommandHttp("http://192.168.21.214:3000/change1"), CommandStop(whill, measure, 5000), CommandTurnLeft(whill, measure, 90.0), CommandForward(whill, measure, 1.0), CommandTurnRight(whill, measure, 45.0), CommandHttp("http://192.168.21.214:3000/change2"), CommandStop(whill, measure, 5000), CommandTurnLeft(whill, measure, 135.0), CommandForward(whill, measure, 1.0), CommandTurnRight(whill, measure, 45.0), CommandHttp("http://192.168.21.214:3000/change3"),
#!/usr/bin/python from nanpy import (ArduinoApi, SerialManager) from time import sleep from time import time from measure import Measure #trigpin=4 #echopin=3 trigpin = 2 echopin = 3 try: connection = SerialManager() a = ArduinoApi(connection=connection) a.pinMode(trigpin, a.OUTPUT) a.pinMode(echopin, a.INPUT) m = Measure(connection=connection) print("Connected to Arduino") except: print("Failed to connect") while True: distance = m.getMeasure(trigpin, echopin) if distance > 0: print(distance) sleep(.25)
estimate_init_pose = np.array([[12.0], [3.0], [0.0]]) move_model = MoveModel(init_pose) landmarks = Landmark() slidewindow_graph = Slidewindow_graph() draw = Draw(landmarks, slidewindow_graph, move_model) # 传感器半径 r = 3.0 # 循环计数 n = 0 sum = 100 # 主逻辑(*.*) ######################################################### while n != sum: measure = Measure(move_model, landmarks, r) measure.GetMeasure(n) if n == 0: # 整个框架就是为了维护这个slidewindow_graph结构 slidewindow_graph.Initialize(estimate_init_pose, measure) else: t1 = time.clock() slidewindow_graph.Update(measure) t2 = time.clock() #print(t2-t1) draw.Show_result(r) move_model.Updatepose() n = n + 1 ##############################################################
client.loop_start() # subscribing to pool receiving endpoint logger.info("Subscribing to " + f"getPool{args.DeviceID}") client.subscribe(f"getPool{args.DeviceID}") # register receive routine client.on_message = on_message # db to log generated data db = TinyDB(f"log/device{args.DeviceID}.json") # mainloop while True: # generate measure measure = Measure(args.DeviceID) # pack and pad payload payload = pad16(measure.pack()) logger.info(f"produced {measure}") # log generated data for testing db.insert({"entry": str(measure)}) logger.debug(f"payload: {payload.hex()}") # encrypt payload using SF encrypted = cipher.encrypt(payload) # publish data to AG client.publish("addToPool", encrypted.hex()) logger.debug("publishing " + encrypted.hex() + " to topic /addToPool")
def deMeasureTrack(self): """Get rid of measures and return a simpler representation""" """Print Track such that robots can make music""" # Get set of all octaves used octaves = [] octaveCounts = {} for measure in self.measures: tempOctaveCounts = {} for m in measure.getOctaveList(): if m in tempOctaveCounts: tempOctaveCounts[m] = tempOctaveCounts[m] + 1 else: tempOctaveCounts[m] = 1 for k in tempOctaveCounts: if k in octaveCounts: if tempOctaveCounts[k] > octaveCounts[k]: octaveCounts[k] = tempOctaveCounts[k] else: pass else: octaveCounts[k] = tempOctaveCounts[k] # Get string representing all the octaves we need octaves = [str(key) * octaveCounts[key] for key in octaveCounts] octaves = ''.join(octaves) octaves = [int(octaves[i]) for i in range(0, len(octaves))] octaves.sort() # print(octaves) gives something like [2, 3, 3, 4, 4, 5, 5, 6] # We have 1 measure of pitch 2, 2 measures of pitch 3, 2 measures # of pitch 4, etc. """ Combine all measures Make sure each measure contains all octaves before combining Write a blank (e.g. "5|---------...--|" octave for unused octaves in each measure. """ track = [] measureToWrite = Measure() # Empty measure to write stuff to for measure in self.measures: measureCopy = copy.deepcopy( measure) # can use mutable methods on this copy for o in octaves: if o in measureCopy.getOctaveList(): measureToWrite.addOctave(str(measureCopy.popOctaveN(o))) else: # If octave o is not in the measure's octave list, then # write a blank octave by repeating "-"s. blankLen = 26 # Don't hard code 26 here; get it from somewhere measureToWrite.addOctave("{0}|{1}|".format( o, "-" * blankLen)) track.append(measureToWrite) measureToWrite = Measure() # Clear this """ When all measures contain all octaves, we can combine those measures by simply concatenating strings. """ combined = [["{0}|".format(i)] for i in octaves] for i in range(0, len(track)): measure = track[i] for j in range(0, len(measure.octaves)): octave = measure.octaves[j] combined[j].append(octave.notes) # octave.notes is None! for i in range(0, len(combined)): combined[i].append("|") # ending char combined[i] = ''.join(combined[i]) return combined