def main(): myBroker = ALBroker("NaoAppBroker", "0.0.0.0", # listen to anyone 0, # find a free port and use it NAO_IP, # parent broker IP NAO_PORT) # parent broker port global SpeechDetection, NaoWorkingMode SpeechDetection = SpeechDetectionModule("SpeechDetection") basicVideoProcessing = BasicVideoProcessing() basicVideoProcessing.connectToCamera() try: while True: img = basicVideoProcessing.getImageFromCamera() if (img == None): print "Image from camera is empty!" break else: if NaoWorkingMode == None: sample.sample() elif NaoWorkingMode == "color": print 'Working in color detection mode...' #cv2.imshow('Color', img) - cos nie dziala, pewnie kwestia tego, ze to kod ladowany jako modul elif NaoWorkingMode == "text": print 'Working in text detection mode...' elif NaoWorkingMode == "hand": print 'Working in hand gesture detection mode...' elif NaoWorkingMode == "phone": print 'Working in mobile phone detection mode...' except KeyboardInterrupt: print "Interrupted by user, shutting down" except RuntimeError, err: print "An error occured: " + str(err)
def evaluateCrossSections(): for tmpline in csFile: line=tmpline.split() if line==[]: continue if line[0]=='STOP': break if not (line[0]=='charm' or line[0]=='beauty'): continue Flavour=line[0] Q2Min=line[1] x=sample(Flavour, Q2Min, "AE", 0, '') x._verbose=False x._NumberOfEventsCrossSect=10000 print Flavour+', Q^2> '+Q2Min+' GeV^2, resAE' x._calculateCrossSect() y=sample(Flavour, Q2Min, "C", 0, '') y._verbose=False y._NumberOfEventsCrossSect=10000 print Flavour+', Q^2> '+Q2Min+' GeV^2, resC' y._calculateCrossSect() z=sample(Flavour, Q2Min, "BGF", 0, '') z._verbose=True z._NumberOfEventsCrossSect=10000 print Flavour+', Q^2> '+Q2Min+' GeV^2, BGF' z._calculateCrossSect()
async def on_message(message): global training if client.user.mentioned_in(message): sp.sample(sampleArgs, re.sub('(<@|<@!)([0-9])+>', '', message.content)) tf.reset_default_graph() with open('output/output.txt', 'r') as the_file: lines = the_file.read().split('\\r\\n') # the training data im using produced a lot of double-escaped unicode, e.g. \\xf012 or something like that, so it has to decode twice, but python is funky so this is the ugly, horrible fix # remove the re.sub() to allow barry to tag people await client.send_message(discord.Object(id=client_channel), re.sub('(<@|<@!)([0-9])+>', '', lines[1].encode('ascii').decode('unicode_escape').encode('ascii').decode('unicode_escape')), tts=bool(random.getrandbits(1))) elif message.content.startswith('!record') and message.author.id == admin_id: print('Recording...') with open('data/input.txt', 'w') as the_file: async for log in client.logs_from(message.channel, limit=1000000000000000): messageEncode = str(log.content.encode("utf-8"))[2:-1] template = '{message}\n' try: the_file.write(template.format(message=messageEncode)) except: the_file.write(template.format(message=messageEncode)) print('Data Collected from ' + message.channel.name) elif message.content.startswith('!train') and message.author.id == admin_id: if training != True: # status change doesnt work right now. starting the training turns off the discord bot, probably just due to how the training function works await client.change_presence(game=None, status='with his brain', afk=False) training = True tr.train(trainArgs) elif training == True: await client.change_presence(game=None, status=None, afk=False) training = False elif message.content.startswith('!leave') and message.author.id == admin_id: await client.disconnect()
def main(): myBroker = ALBroker( "NaoAppBroker", "0.0.0.0", # listen to anyone 0, # find a free port and use it NAO_IP, # parent broker IP NAO_PORT) # parent broker port global SpeechDetection, NaoWorkingMode SpeechDetection = SpeechDetectionModule("SpeechDetection") basicVideoProcessing = BasicVideoProcessing() basicVideoProcessing.connectToCamera() try: while True: img = basicVideoProcessing.getImageFromCamera() if (img == None): print "Image from camera is empty!" break else: if NaoWorkingMode == None: sample.sample() elif NaoWorkingMode == "color": print 'Working in color detection mode...' #cv2.imshow('Color', img) - cos nie dziala, pewnie kwestia tego, ze to kod ladowany jako modul elif NaoWorkingMode == "text": print 'Working in text detection mode...' elif NaoWorkingMode == "hand": print 'Working in hand gesture detection mode...' elif NaoWorkingMode == "phone": print 'Working in mobile phone detection mode...' except KeyboardInterrupt: print "Interrupted by user, shutting down" except RuntimeError, err: print "An error occured: " + str(err)
def sample(checkpoint_path, sampling_type, n_samples): import sample models_path = os.path.dirname(checkpoint_path) models_path = os.path.join(models_path, 'models.pkl') with open(models_path, 'rb') as f: models = pkl.load(f) for model in models: sample.sample(checkpoint_path, model, sampling_type, n_samples)
def play(self): abcpath = os.getcwd() + '\\music\\' + str(self.secnum) + '.abc' midipath = os.getcwd() + '\\music\\' + str(self.secnum) + '.mid' if not os.path.exists(midipath): sample(self.secnum) music21.converter.parse(abcpath).write('midi', midipath) pygame.mixer.music.load(midipath) pygame.mixer.music.play(-1)
def sentence_gen(): histo_text = get_words('siddhartha.txt') histo = histogram(histo_text) random_word = sample(histo) random_words = [] for i in range(7): random_words.append(sample(histo)) random_sentence = sentence_maker(random_words) return random_sentence
def chain_traversal(self, length=10): ''' Creates a sentence using the Markov Chain''' current_word = random.choice(list(self)) sentence = [] sentence.append(current_word) for _ in range(length): new_word = sample(self[current_word]) sentence.append(sample(self[current_word])) current_word = new_word return ' '.join(sentence)
def fig1(model, output_folder): ''' This function makes two 2x10 images showing the difference between conditioning and intervening ''' str_step = guess_model_step(model) fname = os.path.join(output_folder, str_step + model.model_type) for key in [ 'Young', 'Smiling', 'Wearing_Lipstick', 'Male', 'Mouth_Slightly_Open', 'Narrow_Eyes' ]: #for key in ['Mustache','Bald']: #for key in ['Mustache']: print 'Starting ', key, #for key in ['Bald']: p50, n50 = find_logit_percentile(model, key, 50) do_dict = {key: np.repeat([p50], 10)} eps = 3 cond_dict = {key: np.repeat([+eps], 10)} out, _ = sample(model, do_dict=do_dict) intv_images = out['G'] out, _ = sample(model, cond_dict=cond_dict) cond_images = out['G'] images = np.vstack([intv_images, cond_images]) dc_file = fname + '_' + key + '_topdo1_botcond1.pdf' save_figure_images(model.model_type, images, dc_file, size=[2, 10]) do_dict = {key: np.repeat([p50, n50], 10)} cond_dict = {key: np.repeat([+eps, -eps], 10)} dout, _ = sample(model, do_dict=do_dict) cout, _ = sample(model, cond_dict=cond_dict) itv_file = fname + '_' + key + '_topdo1_botdo0.pdf' cond_file = fname + '_' + key + '_topcond1_botcond0.pdf' eps = 3 save_figure_images(model.model_type, dout['G'], itv_file, size=[2, 10]) save_figure_images(model.model_type, cout['G'], cond_file, size=[2, 10]) print '..finished ', key #return images,cout['G'],dout['G'] return key
def mplot3d(f, var1, var2, show=True): """ Plot a 3d function using matplotlib/Tk. """ import warnings warnings.filterwarnings("ignore", "Could not match \S") try: import pylab as p import matplotlib.axes3d as p3 except ImportError: raise ImportError("Matplotlib is required to use mplot3d.") x, y, z = sample(f, var1, var2) fig = p.figure() ax = p3.Axes3D(fig) #ax.plot_surface(x,y,z) #seems to be a bug in matplotlib ax.plot_wireframe(x, y, z) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') if show: p.show()
def create_name(start_token, dictionary): """ Takes dictionary, start and end tokens and makes a sentence. """ # create sentence and add first word name = [] # this is hard coded; must be changed to fit the order number; currently second (letter1, letter2) = start_token name.append(letter1) name.append(letter2) current_token = start_token # stop when current_token is a stop token while not current_token[1].isspace() and len(name) <= 10: for key, value in dictionary.items(): if key == current_token: # sample from histogram of values cumulative = sample.cumulative_distribution(value) sample_letter = sample.sample(cumulative) # add new sample to name_list name.append(sample_letter) # assign second word of key and value to current token # this is hard coded; must be changed to fit the order number # unpacking the current token (current_token_one, current_token_two) = current_token current_token = (current_token_two, sample_letter) # get out of for loop and start process over break return name
def hello_world(): his = histogram_dict(original_text) p = s.get_probability(his) s_ = s.sample(p, his) # print("W: %s" % s) return "W: %s" % s_
def encoder(message_str, context, enc, model): unicode_enc = True mode = 'arithmetic' block_size = 3 # for huffman and bins temp = 0.9 # for arithmetic precision = 26 # for arithmetic sample_tokens = 100 # for sample topk = 300 finish_sent=True context_tokens = encode_context(context, enc) if mode not in ['arithmetic', 'huffman', 'bins', 'sample']: raise NotImplementedError if unicode_enc: ba = bitarray.bitarray() ba.frombytes(message_str.encode('utf-8')) message = ba.tolist() else: message_ctx = [enc.encoder['<|endoftext|>']] message_str += '<eos>' message = decode_arithmetic(model, enc, message_str, message_ctx, precision=40, topk=60000) # Next encode bits into cover text, using arbitrary context Hq = 0 if mode == 'arithmetic': out, nll, kl, words_per_bit, Hq = encode_arithmetic(model, enc, message, context_tokens, temp=temp, finish_sent=finish_sent, precision=precision, topk=topk) elif mode == 'huffman': out, nll, kl, words_per_bit = encode_huffman(model, enc, message, context_tokens, block_size, finish_sent=finish_sent) elif mode == 'bins': out, nll, kl, words_per_bit = encode_block(model, enc, message, context_tokens, block_size, bin2words, words2bin, finish_sent=finish_sent) elif mode == 'sample': out, nll, kl, Hq = sample(model, enc, sample_tokens, context_tokens, temperature=temp, topk=topk) words_per_bit = 1 text = enc.decode(out) return text
def mplot2d(f, var, show=True): """ Plot a 2d function using matplotlib/Tk. """ import warnings warnings.filterwarnings("ignore", "Could not match \S") p = import_module("pylab") if not p: sys.exit("Matplotlib is required to use mplot2d.") if not is_sequence(f): f = [ f, ] for f_i in f: x, y = sample(f_i, var) p.plot(x, y) p.draw() if show: p.show()
def mplot3d(f, var1, var2, show=True): """ Plot a 3d function using matplotlib/Tk. """ import warnings warnings.filterwarnings("ignore", "Could not match \S") p = import_module('pylab') # Try newer version first p3 = import_module('mpl_toolkits.mplot3d', __import__kwargs={ 'fromlist': ['something'] }) or import_module('matplotlib.axes3d') if not p or not p3: sys.exit("Matplotlib is required to use mplot3d.") x, y, z = sample(f, var1, var2) fig = p.figure() ax = p3.Axes3D(fig) # ax.plot_surface(x, y, z, rstride=2, cstride=2) ax.plot_wireframe(x, y, z) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') if show: p.show()
def minning(request): sample=sample.sample('F:/2016Graduate/20160922.xlsx',u'Python') sup=118 fsetdict={} # fItems=mining.findFItem(sample.recipeTrans, sup) # f2set=mining.findFSet(sample.recipeTrans, fItems.keys(), 2, sup) # fsetdict.update(f2set) # f3set=mining.findFSet(sample.recipeTrans, mining.getItemInSet(f2set.keys()), 3, sup) # fsetdict.update(f3set) # f4set=mining.findFSet(sample.recipeTrans, mining.getItemInSet(f3set.keys()), 4, sup) # fsetdict.update(f4set) # r=creatrules.rulesbuilder(sample=sample,fset=fsetdict,itemcount=fItems,conf=0.7) # r.export2excel('F:/2016Graduate/python_result02.xls') # dr=drawrule.drawrule(r.rulesdict) # dr.drawscatter(fn='scatter02.png') # dr.drawbubble(fn='bubble02.png') # dr.drawnetwork(fn='network02.png') fItems=mining_adv.findFItem(sample.recipeTrans, sample.weights, sup) f2set=mining_adv.findFSet(sample.recipeTrans, fItems.keys(), sample.weights, 2, sup) fsetdict.update(f2set) f3set=mining_adv.findFSet(sample.recipeTrans, mining.getItemInSet(f2set.keys()), sample.weights, 3, sup) fsetdict.update(f3set) f4set=mining_adv.findFSet(sample.recipeTrans, mining.getItemInSet(f3set.keys()), sample.weights, 4, sup) fsetdict.update(f4set) r=creatrules.rulesbuilder(sample=sample,fset=fsetdict,itemcount=fItems,conf=0.7) r.export2excel('F:/2016Graduate/python_result_y0.xls')
def create_sentence(self, start_token, stop_tokens, dictionary): """ takes dictionary, start and end tokens and makes a sentence """ # create sentence and add first word sentence = [] # this is hard coded; must be changed to fit the order number; currently third (word1, word2, word3) = start_token sentence.append(word1) sentence.append(word2) sentence.append(word3) # print("There should be three words", sentence) current_token = start_token # print("This is my dictionary", dictionary) # stop when current_token is a stop token while current_token not in stop_tokens or len(sentence) <= 8: for key, value in dictionary.items(): if key == current_token: # # sample from histogram of values cumulative = sample.cumulative_distribution(value) sample_word = sample.sample(cumulative) # add new sample to sentence_list sentence.append(sample_word) # assign second word of key and value to current token # this is hard coded; must be changed to fit the order number # I am unpacking the current token (current_token_one, current_token_two, current_token_three) = current_token current_token = (current_token_two, current_token_three, sample_word) # get out of for loop and start process over break return sentence
def testSample(): model = WrappedModel(TestDistributionSystem().getModel()) print(model.getDistribution(DistrCall('decideBias', []))) for i in range(20): res = sample(model, DistrCall('main', [])) print(res) print(list(map(model.refToJSON, res)))
def newsample(self): try: dlg = sampleDialog() if (dlg.exec_() == QDialog.Accepted): name, pt = dlg.getValues() if (pt is None or np.equal(pt[:-1], None).any()): raise TypeError self.fig.clear() if self.sample: self.sample.disconnect_plot() self.sample = sample(name=name, bl=pt[0], br=pt[1], tl=pt[2], tr=pt[3], c=pt[4], temp=self.temp) self.sample.show(self.fig) except TypeError: logger.exception( "Can't create sample : At least one point is invalid") QMessageBox.critical( self, "Error", "Can't create sample : At least one point is invalid") except Exception as e: logger.exception("Can't create sample") QMessageBox.critical(self, "Error", "Can't create sample")
def update_valid(n, p): n = unwrap(n) # this grid square must have been created already in locally_early() q, t, r = grid[n] # no change if this square is already output or covered if isinf(t): return False p = closest_p(p, n) r = r - disk(p) r.simplify() # update grid[n] = (q, t, r) # Did p invalidate q? if too_close(q, p): A = r.area() if A == 0: # q's square has been covered grid[n] = (q, inf, False) else: # generate a new q q = sample.sample(r) t = t + random.expovariate(A) grid[n] = (q, t, r) return True return False
def mplot3d(f, var1, var2, show=True): """ Plot a 3d function using matplotlib/Tk. """ import warnings warnings.filterwarnings("ignore", "Could not match \S") p = import_module('pylab') # Try newer version first p3 = import_module('mpl_toolkits.mplot3d', __import__kwargs={'fromlist':['something']}) or import_module('matplotlib.axes3d') if not p or not p3: sys.exit("Matplotlib is required to use mplot3d.") x, y, z = sample(f, var1, var2) fig = p.figure() ax = p3.Axes3D(fig) #ax.plot_surface(x,y,z) #seems to be a bug in matplotlib ax.plot_wireframe(x,y,z) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') if show: p.show()
def mplot3d(f, var1, var2, show=True): """ Plot a 3d function using matplotlib/Tk. """ import warnings warnings.filterwarnings("ignore", "Could not match \S") try: import pylab as p import matplotlib.axes3d as p3 except ImportError: raise ImportError("Matplotlib is required to use mplot3d.") x, y, z = sample(f, var1, var2) fig = p.figure() ax = p3.Axes3D(fig) #ax.plot_surface(x,y,z) #seems to be a bug in matplotlib ax.plot_wireframe(x,y,z) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') if show: p.show()
def get_sample_freq(self, selected_freq, SPS=40000, dispFFT=False, FFTchannels=[1, 2, 3], axis=None, raw_file=""): samples_count = 4096 bytes_in_block = samples_count * 16 #4 channels, 4B per sample fftfreq = np.fft.rfftfreq( samples_count, d=1.0 / SPS) # /16 -> /4 channels /4 bytes per channel selected_index = np.argmin(np.abs(fftfreq - selected_freq)) #self._tail = struct.unpack_from("l", self._data, self.PRU0_OFFSET_DRAM_HEAD)[0] samples = self.get_sample_block(bytes_in_block) #Invert dimensions channels = np.transpose(samples) ref_wave = waveform( selected_freq, np.fft.rfft(channels[0])[selected_index] / samples_count) selected_sample = sample(ref_wave) for chan in FFTchannels: fft = np.fft.rfft(channels[chan]) / samples_count chan_wave = waveform(selected_freq, fft[selected_index]) selected_sample.add_channel(chan_wave) return selected_sample
def __init__(self, maxNumber=1): self.n = 0 self.mu = 0 self.m2 = 0 self.lo = 10**32 self.hi = -10**32 self.sd = 0 self.same = sample(maxNumber)
def nystrom(net, all_idx, k, d): k_set = sample(net, k, 'deg^2_prob') mat = net.calc_matrix_sparse(k_set, k_set).toarray() u, dd, v = np.linalg.svd(mat) reconstruct_mat = u[:, :d] @ np.diag(mydiv(dd[:d])) @ v[:d, :] left = net.calc_matrix_sparse(all_idx, k_set) right = net.calc_matrix_sparse(k_set, all_idx) return left @ reconstruct_mat @ right
def chain_traversal(self, length=20): ''' Creates a sentence using the Markov Chain''' current_word = random.choice(list(self.keys())) sentence = [] for _ in range(length): new_word = sample(self[current_word]) sentence.append(current_word[1]) current_word = (current_word[1], new_word) return ' '.join(sentence)
def __init__(self): self.cli = sample.sample() self.df = pd.read_csv('soybeans-texas-collin.csv') self.df.columns = [ c.lower().replace(' ', '_') for c in self.df.columns ] self.df['value'] = self.df['value'].str.replace(",", "") self.df['value'] = self.df['value'].apply(pd.to_numeric, errors='coerce')
def create_sample(dataset, model_name, prime, size=5000): output_name = "%s_%s_%d.txt" % (model_name, prime, size) output_file = os.path.join(dataset_dir, dataset, output_dir, output_name) model_file = os.path.join(dataset_dir, dataset, models_dir, model_name) sample_args = SampleArguments(model_file, size, prime) with open(output_file, "w") as out: tf.reset_default_graph() out.write(sample.sample(sample_args)) return output_file
def generate(): parser = argparse.ArgumentParser(description='Sample some text from the') parser.add_argument('epoch', type=int, help='epoch checkpoint to sample') parser.add_argument('--seed', default='', help='initial seed for the text') parser.add_argument('--len', type=int, default=512, help='no of character') args = parser.parse_args() print(args.epoch) music = sample(args.epoch, args.seed, args.len) # return sample(args.epoch, args.seed, args.len) return render_template('generate.html', data=music)
def __init__(self, maxNumber, listNumbers): self.n = 0 self.mu = 0 self.m2 = 0 self.lo = 10 ^ 32 self.hi = -10 ^ 32 self.sd = 0 self.same = sample(maxNumber) for x in listNumbers: self.numInc(x)
def train(self, round): # Simulation Input Sampling self.thetas = sample.sample(self.args, self.args.simulation_budget_per_round, self.netDiscriminator, self.netPosterior, self.observation, self.prior, self.sim, round == 0, self.args.posteriorInferenceMethod == 'no', self.args.numChains, SNLE=False).detach().to(self.args.device) # Simulation Execution simulated_output = self.sim.parallel_simulator(self.thetas) print("simulated output : ", simulated_output.shape, self.thetas.shape) # Likelihood Learning self.training_theta, self.training_x, self.validation_theta, self.validation_x, self.netDiscriminator = \ RatioLearning.LikelihoodToEvidenceRatioLearning(args, round, self.thetas, simulated_output, self.training_theta, self.training_x, self.validation_theta, self.validation_x, self.netDiscriminator, self.optDiscriminator) # Get Training Teacher Data for Implicit Surrogate Proposal (ISP) Learning if self.args.posteriorInferenceMethod != 'no': self.teacher_theta = sample.sample(self.args, self.args.num_training, self.netDiscriminator, self.netPosterior, self.observation, self.prior, self.sim, round == -1, True, self.args.num_training, parallel=True, SNLE=False) # Implicit Surrogate Proposal (ISP) Distribution Learning self.netPosterior = PosteriorLearning.PosteriorLearning( args, self.sim, self.teacher_theta)
def generate_sentence(markov_dict): length = 10 first_word = start_word(markov_dict) sentence = first_word.capitalize() for word in range(random.randint(1, length)): second_word = sample(markov_dict[first_word]) first_word = second_word sentence += ' ' + second_word return sentence
def main(config): print('creating data') dataLoader = Dataclass(dotdict(config.data_details)) training_data, valid_data = dataLoader.get_training_data() # creat hmm model # model = hmm.GaussianHMM(n_components=12, covariance_type="full") # model.fit(training_data) # feature_matrix , state_sequence = model.sample(100) model = None # The embedding dimension if (not config.sampling_mode): print('starting training') model = train(training_data, valid_data, config) if model == None: model = load_model(config, training_data) print('sampling, and generating midi') sample(model, config, training_data)
def create_npz_files(n, name): total_frames = np.zeros((TOTAL_FRAMES, n, FRAMES, SIDE, SIDE)) total_labels, total_sizes = np.zeros((TOTAL_FRAMES, n)), np.zeros((TOTAL_FRAMES, n)) for i in range(TOTAL_FRAMES): photons = i + 1 index = photons_to_index(photons) total_frames[i], total_labels[i] = sample(photons, n, index) total_sizes[i] = [photons] * n total_frames = total_frames.reshape((TOTAL_FRAMES * n, FRAMES, SIDE, SIDE), order = 'F') total_labels = total_labels.reshape((TOTAL_FRAMES * n), order = 'F') total_sizes = total_sizes.reshape((TOTAL_FRAMES * n), order = 'F') np.savez(name, frames = total_frames, labels = total_labels, sizes = total_sizes)
def __init__(self, max=512, nums=[], func=lambda x: x): self.max = max self.n = 0 self.mu = 0 self.m2 = 0 self.sd = 0 self.lo = inf self.hi = -inf self.w = 1 self._some = sample(self.max) for x in nums: self.numInc(func(x))
def prompt(): prompt = request.args.get('text') challenger = request.args.get('challenger') n = int(request.args.get('n')) args = Namespace(prime=prompt, n=n, save_dir="save/save", sample=1) output = sample(args) new_example = output[0] for character in output[1:]: # Append an underscore if the character is uppercase. if character.isupper(): new_example += '\n' new_example += character return json.dumps({"text": new_example})
def gen_text(): prompt = request.args.get('text') challenger = request.args.get('challenger') n = int(request.args.get('n')) args = Namespace(prime=prompt, n=n, save_dir="save/save", sample=1) output = sample(args) # output = clean_msg(output) output = output.replace("\n", "<br>") print output # output = replace_w_rhymes(output) print output #output = uniform_syl(output) return output
def fetch_grid((i, j)): """on demand generation of empty grid squares.""" (i, j) = unwrap((i, j)) if (i, j) not in grid: xloc = i * grid_spacing yloc = j * grid_spacing r = Polygon( [ (xloc, yloc), (xloc + grid_spacing, yloc), (xloc + grid_spacing, yloc + grid_spacing), (xloc, yloc + grid_spacing), ] ) p = sample.sample(r) t = random.expovariate(r.area()) grid[i, j] = (p, t, r) return grid[(i, j)]
def processConfigFile(confFileSuffix): configFile=open('config'+confFileSuffix+'.cfg','r') for tmpline in configFile: line=tmpline.split() if line==[]: continue if line[0]=='STOP': break if not (line[0]=='charm' or line[0]=='beauty'): #to enable comments in config file continue Flavour=line[0] SubProcess=line[1] Q2Min=line[2] Luminosity=line[3] Trigger=line[4] print Flavour+', subprocess: '+SubProcess+', Q^2> '+Q2Min+' GeV^2' x=sample(Flavour, Q2Min, SubProcess, float(Luminosity), Trigger) x._NumberOfEventsCrossSect=1 x._LetterToAppend='submit.letter'+confFileSuffix x._OutputPathPrefix=getPathPrefix(confFileSuffix) x._generate()
def gibbs_iteration(self, init=False): """ Uses Gibbs sampling to draw a single sample from the posterior distribution over token--component (i.e., token--topic) assignments given this instance's corpus (i.e., document tokens). By default (i.e., if keyword argument 'init' is set to the value 'False') all token--component assignments (and corresponding counts) are assumed to have been initialized previously; otherwise, they are initialized. Keyword arguments: init -- whether to initialize token--component assignments """ corpus = self.corpus Nvt_plus_beta_n = self.Nvt_plus_beta_n Nt_plus_beta = self.Nt_plus_beta Ntd_plus_alpha_m = self.Ntd_plus_alpha_m Nd_plus_alpha = self.Nd_plus_alpha z = self.z for d, (doc, zd) in enumerate(iterview(zip(corpus, z), inc=200)): for n, (v, t) in enumerate(zip(doc.w, zd)): if not init: Nvt_plus_beta_n[v, t] -= 1 Nt_plus_beta[t] -= 1 Ntd_plus_alpha_m[d, t] -= 1 t = sample((Nvt_plus_beta_n[v, :] / Nt_plus_beta) * Ntd_plus_alpha_m[d, :]) Nvt_plus_beta_n[v, t] += 1 Nt_plus_beta[t] += 1 Ntd_plus_alpha_m[d, t] +=1 if init: Nd_plus_alpha[d] += 1 zd[n] = t
def mplot2d(f, var, show=True): """ Plot a 2d function using matplotlib/Tk. """ import warnings warnings.filterwarnings("ignore", "Could not match \S") p = import_module('pylab') if not p: sys.exit("Matplotlib is required to use mplot2d.") if not ordered_iter(f): f = [f,] for f_i in f: x, y = sample(f_i, var) p.plot(x, y) p.draw() if show: p.show()
def mplot2d(f, var, show=True): """ Plot a 2d function using matplotlib/Tk. """ import warnings warnings.filterwarnings("ignore", "Could not match \S") try: import pylab as p except ImportError: raise ImportError("Matplotlib is required to use mplot2d.") if not ordered_iter(f): f = [f,] for f_i in f: x, y = sample(f_i, var) p.plot(x, y) p.draw() if show: p.show()
def processConfigFile(confFileSuffix): configFile=open('config'+confFileSuffix+'.cfg','r') for tmpline in configFile: line=tmpline.split() if line==[]: continue if line[0]=='STOP': break if not (line[0]=='charm' or line[0]=='beauty'): # to enable any comments in config file continue Flavour=line[0] SubProcess=line[1] Q2Min=line[2] Luminosity=line[3] Trigger=line[4] print Flavour+', subprocess: '+SubProcess+', Q^2> '+Q2Min+' GeV^2' x = sample(Flavour, Q2Min, SubProcess, float(Luminosity), Trigger) x._NumberOfEventsCrossSect=NumberOfEventsCrossSect x._LetterToAppend='submit.letter'+confFileSuffix # For each entry in the config file there will be a separate # submission letter file. In addition, content of this submission letter # will be appended to file specified by this variable (will be created # if does not exist). There will be one such file per config file. x._OutputPathPrefix=getOutputPathPrefix(confFileSuffix) x._generate() x._InfoForWeb(webInfoFile)
from sample import sample import numpy as np #x = np.empty(10) x = sample(10,2) print(x)
def test_cab(self): self.assertEqual(1, sample(3, 1, 2))
def classFactory(iface): # load sample class from file sample from sample import sample return sample(iface)
def log_predictive_prob(self, new_corpus, num_samples): """ Returns an approximation of the log probability of the specified new corpus given this instance's corpus (i.e., document tokens) AND current set of token--component (i.e., token--topic) assignments according to LDA. Arguments: new_corpus -- new corpus of documents num_samples -- ... """ V, T = self.V, self.T D_new = len(new_corpus) alpha, alpha_m = self.alpha, self.alpha_m Nvt_plus_beta_n = self.Nvt_plus_beta_n Nt_plus_beta = self.Nt_plus_beta Nvt_new, Nt_new, Ntd_new, z_new = [], [], [], [] for r in xrange(num_samples): Nvt_new.append(zeros((V, T), dtype=int)) Nt_new.append(zeros(T, dtype=int)) Ntd_new.append(zeros((D_new, T), dtype=int)) z_r = [] for doc in new_corpus: z_r.append(zeros(len(doc), dtype=int)) z_new.append(z_r) log_p = 0 for d, doc in enumerate(iterview(new_corpus)): for n, v in enumerate(doc.w): tmp = zeros(num_samples, dtype=float) for r in xrange(num_samples): # for efficiency, resample only those # token--component assignments belonging to # previous tokens in the current document for prev_n in xrange(0, n): prev_v = doc.w[prev_n] t = z_new[r][d][prev_n] Nvt_new[r][prev_v, t] -= 1 Nt_new[r][t] -= 1 Ntd_new[r][d, t] -= 1 t = sample((Nvt_new[r][prev_v, :] + Nvt_plus_beta_n[prev_v, :]) / (Nt_new[r] + Nt_plus_beta) * (Ntd_new[r][d, :] + alpha_m)) Nvt_new[r][prev_v, t] += 1 Nt_new[r][t] += 1 Ntd_new[r][d, t] += 1 z_new[r][d][prev_n] = t pass # YOUR CODE GOES HERE Nvt_new[r][v, t] += 1 Nt_new[r][t] += 1 Ntd_new[r][d, t] += 1 z_new[r][d][n] = t log_p += log_sum_exp(tmp) - log(num_samples) return log_p
def sample(self): if len(self.noteQueue) == 0: self.noteQueue.extend(lstm.sample(40, 1)) next_note = self.noteQueue[0] self.noteQueue = self.noteQueue[1:] # shift the first element return next_note
def test_abc(self): self.assertEqual(1, sample(1, 2, 3))
def test_acb(self): self.assertEqual(1, sample(1, 3, 2))
def test_bac(self): self.assertEqual(1, sample(2, 1, 3))
def test_bca(self): self.assertEqual(1, sample(2, 3, 1))
def __init__(self): self.lastNotePlayed = None self.noteQueue = lstm.sample(40, 1) self.bpm = 120
def test_cba(self): self.assertEqual(1, sample(3, 2, 1))