def index(request): plots = defaultdict(list) with open('/home/sachin/mysite/plots/static/plots/plots.pickle', 'rb') as handle: plots = pickle.load(handle) with open('/home/sachin/mysite/plots/static/plots/scores.pickle', 'rb') as handle: scores = pickle.load(handle) if request.method == 'POST': # X = plots['all']; # T = [scores[i] for i in X]; # T = [0 if np.isnan(x) else x for x in T]; # Y= [x for (t,x) in sorted(zip(T,X),reverse=True)]; # Y = map(str, Y) # T = sorted(T,reverse=True) if (not request.FILES): f = 'wine.csv' else: f = request.FILES['myfile'] fs = request.FILES['myschema'] with open('file.csv', 'wb+') as destination: for chunk in f.chunks(): destination.write(chunk) with open('schema.csv', 'wb+') as destination: for chunk in fs.chunks(): destination.write(chunk) Generate.main("schema.csv", "prototype.csv") Groupby.main("file.csv", "schema.csv") Genplots.main("file.csv", "experiment.csv", "groups.csv") p = list(range(settings.count)) p = map(str, p) # return render_to_response("plots/index.html", {'plots_scores': zip(Y,T), 'filename' : f}) return render_to_response("plots/index.html", { 'plots': p, 'filename': f }) if request.method == 'GET': # If the form is submitted f = '' search_query = request.GET.get('search_box', None) X = plots[str(search_query).lower()] T = [scores[i] for i in X] T = [0 if np.isnan(x) else x for x in T] Y = [x for (t, x) in sorted(zip(T, X), reverse=True)] Y = map(str, Y) T = sorted(T, reverse=True) return render_to_response( 'plots/index.html', # {'plots': plots[search_query]}) { 'plots_scores': zip(Y, T), 'filename': f })
def test_generate_relative(self): sys.argv = [ sys.argv[0], '--seed', '0', '--player_files_path', str(self.rel_input_dir), '--outputpath', self.output_tempdir.name ] print(f'Testing Generate.py {sys.argv} in {os.getcwd()}') Generate.main() self.assertOutput(self.output_tempdir.name)
def CreateTable(self): Extras.Load(0) time.sleep(0.5) Extras.Load(10) time.sleep(0.5) Extras.Load(20) time.sleep(0.5) gen = Generate(self.file) Extras.Load(50) time.sleep(0.5) self.notas = self.cria_event(gen.generateit()) Extras.Load(100)
def MRA_StandardNormal(N, L, K, sigma): x = np.zeros((K, L)) # Generate Standard Normally Distributed signals for k in range(K): x[k] = np.random.standard_normal(L) x[k] = (x[k] - np.mean(x[k])) / np.linalg.norm(x[k] - np.mean(x[k]), 2) # Normalize signal y, true_partition = Generate.generate_MRA(N, K, L, sigma, x) max_corr = Generate.generate_maxcorr(N, L, y) return y, max_corr, true_partition
def __init__(self): self.gen=Generate(wv_file='./storyline_for_reference/glove.6B.300d.word2vec.txt') self.mp = Meta_Poetry_Glove(wv_file='./storyline_for_reference/glove.6B.300d.word2vec.txt') #get set of templates """if type(template_dataset)==type(None): dataset, second_line, third_line, last_two=get_templates()#function in functions.py self.dataset=dataset self.second_line=second_line self.third_line=third_line self.last_two=last_two""" #set of part of speach with open('postag_dict_all.p','rb') as f: postag_dict=pickle.load(f) self.postag=postag_dict[2]
def MRA_CorrelatedNormal(N, L, K, a, b, choice, sigma): x = np.zeros((K, L)) # Generate Standard Normally Distributed signals for k in range(K): x[k] = generate_a_signal(L, a, b, choice) x[k] = (x[k] - np.mean(x[k])) / np.linalg.norm(x[k] - np.mean(x[k]), 2) # Normalize signal y, true_partition = Generate.generate_MRA(N, K, L, sigma, x) max_corr = Generate.generate_maxcorr(N, L, y) G = Generate.generate_graph(max_corr, true_partition) return G, true_partition
def load_state(self, path): savefile = open(path, "rb") save_data = pickle.load(savefile) savefile.close() self.player = save_data["player"] self.seed = save_data["seed"] Generate.setup(self.seed) player_chunk = Convert.world_to_chunk(self.player.pos[0])[1] self.loaded_chunks = TwoWayList.TwoWayList() self.load_chunks(player_chunk) self.player.load_image() for row in self.player.inventory: for item in row: if item is not None: item.load_image()
def index(request): plots = defaultdict(list); with open('/home/sachin/mysite/plots/static/plots/plots.pickle', 'rb') as handle: plots = pickle.load(handle) with open('/home/sachin/mysite/plots/static/plots/scores.pickle', 'rb') as handle: scores = pickle.load(handle) if request.method == 'POST': # X = plots['all']; # T = [scores[i] for i in X]; # T = [0 if np.isnan(x) else x for x in T]; # Y= [x for (t,x) in sorted(zip(T,X),reverse=True)]; # Y = map(str, Y) # T = sorted(T,reverse=True) if(not request.FILES): f='wine.csv'; else: f=request.FILES['myfile'] fs=request.FILES['myschema'] with open('file.csv', 'wb+') as destination: for chunk in f.chunks(): destination.write(chunk) with open('schema.csv', 'wb+') as destination: for chunk in fs.chunks(): destination.write(chunk) Generate.main("schema.csv", "prototype.csv") Groupby.main("file.csv", "schema.csv") Genplots.main("file.csv", "experiment.csv", "groups.csv") p=list(range(settings.count)) p=map(str,p) # return render_to_response("plots/index.html", {'plots_scores': zip(Y,T), 'filename' : f}) return render_to_response("plots/index.html", { 'plots':p, 'filename' : f}) if request.method == 'GET': # If the form is submitted f=''; search_query = request.GET.get('search_box', None) X = plots[str(search_query).lower()]; T = [scores[i] for i in X]; T = [0 if np.isnan(x) else x for x in T]; Y= [x for (t,x) in sorted(zip(T,X),reverse=True)]; Y = map(str, Y) T = sorted(T,reverse=True) return render_to_response('plots/index.html', # {'plots': plots[search_query]}) {'plots_scores': zip(Y,T), 'filename' : f})
def Apply_Policy_To_Random_Hypo(hypo_subset, number_features, state_action_label_value_map): R = 0 is_end = False next_feature = 0 true_hypothesis = Generate.Get_Hypo(hypo_subset) hypo_remaining_set = hypo_subset feature_remaining_set = [] feature_trajectory = [] current_feature = -1 current_label = -1 for i in range(number_features): feature_remaining_set.append(i) while True: if is_end: break else: next_feature = Select.MonteCarlo_Select( feature_remaining_set, current_feature, current_label, state_action_label_value_map) Select.Erase_Feature(feature_remaining_set, next_feature) hypo_remaining_set = Observe.Observe_Subset( true_hypothesis, hypo_remaining_set, next_feature) Observe.Clear_Overlap(feature_remaining_set, hypo_remaining_set) is_end = Observe.Check_End(hypo_remaining_set) feature_trajectory.append(next_feature) current_label = true_hypothesis[next_feature] current_feature = next_feature return feature_trajectory
def test_generate_yaml(self): # override host.yaml defaults = Utils.get_options()["generator"] defaults["player_files_path"] = str(self.yaml_input_dir) defaults["players"] = 0 sys.argv = [ sys.argv[0], '--seed', '0', '--outputpath', self.output_tempdir.name ] print( f'Testing Generate.py {sys.argv} in {os.getcwd()}, player_files_path={self.yaml_input_dir}' ) Generate.main() self.assertOutput(self.output_tempdir.name)
def make_parser(self, debug_level = 0): import Generate, RecordReader want = 0 if self.header_expression is not None: header_tagtable, want_flg, attrlookup = \ Generate.generate(self.header_expression, debug_level = debug_level) make_header_reader = self.make_header_reader header_args = self.header_args else: header_tagtable = () want_flg = 0 attrlookup = {} make_header_reader = None, header_args = None record_tagtable, want_flag, tmp_attrlookup = \ Generate.generate(self.record_expression, debug_level = debug_level) make_record_reader = self.make_record_reader record_args = self.record_args attrlookup.update(tmp_attrlookup) want = want or want_flg if self.footer_expression is not None: footer_tagtable, want_flag, tmp_attrlookup = \ Generate.generate(self.footer_expression, debug_level = debug_level) make_footer_reader = self.make_footer_reader footer_args = self.footer_args attrlookup.update(tmp_attrlookup) else: footer_tagtable = () want_flg = 0 make_footer_reader = None footer_args = None want = want or want_flg return Parser.HeaderFooterParser( self.format_name, self.attrs, make_header_reader, header_args, header_tagtable, make_record_reader, record_args, record_tagtable, make_footer_reader, footer_args, footer_tagtable, (want, debug_level, attrlookup))
def make_parser(self, debug_level = 0): import Generate tagtable, want_flg, attrlookup = Generate.generate( self.record_expression, debug_level) return Parser.RecordParser(self.format_name, self.attrs, tagtable, (want_flg, debug_level, attrlookup), self.make_reader, self.reader_args)
def Reset(self): self.true_hypothesis = Generate.Get_Hypo(self.hypo_subset) self.feature_remaining = [] self.feature_trajectory = [] self.state_list = [] self.hypo_remaining_set = copy.deepcopy(self.hypo_subset) for f in range(self.num_feature): self.feature_remaining.append(f)
def MRA_Rect_Trian(N, L, K, sigma): x = np.zeros((K, L)) # Generate Rectangle at x[0] for l in range(int(L / 4)): x[0][l] = 1 x[0] = (x[0] - np.mean(x[0])) / np.linalg.norm(x[0] - np.mean(x[0]), 2) # Normalize signal # Generate Triangle at x[1] x[1] = signal.triang(L) x[1] = (x[1] - np.mean(x[1])) / np.linalg.norm(x[1] - np.mean(x[1]), 2) # Normalize signal y, true_partition = Generate.generate_MRA(N, K, L, sigma, x) max_corr = Generate.generate_maxcorr(N, L, y) return y, max_corr, true_partition
def index(request): if (request.is_ajax()): print("reached here") schema = request.POST.get('schema', '') print(schema) sdict = ast.literal_eval(schema) print(type(sdict)) for key, value in sdict.items(): print(key) print(value) with open('schema.csv', 'wt+') as destination: csvwriter = csv.writer(destination) csvwriter.writerow(["name", "type"]) for key, value in sdict.items(): csvwriter.writerow([key, value]) with open('file.txt', 'r') as f: filename = f.readlines() Generate.main("schema.csv", "prototype.csv") Groupby.main(filename[0], "schema.csv") Genplots.main(filename[0], "experiment.csv", "groups.csv") return HttpResponse([]) elif (request.method == 'POST'): settings.count = 0 if (not request.FILES): plotdata = PlotData.objects.all() return render_to_response("index.html", {'plotdata': plotdata}) else: f = request.FILES['myfile'] with open('file.txt', 'w') as dest: dest.write(f.name) with open(f.name, 'wb+') as destination: for chunk in f.chunks(): destination.write(chunk) with codecs.open(f.name, 'r', encoding="utf-8") as f: d_reader = csv.DictReader(f) headers = d_reader.fieldnames plotdata = PlotData.objects.all() return render_to_response("index.html", { 'names': headers, 'plotdata': plotdata }) plotdata = PlotData.objects.all() return render_to_response("index.html", {'plotdata': plotdata})
def tweet(seed = ""): config = get_config() oracle = twitter.Api(consumer_key=config['consumer_key'], consumer_secret=config['consumer_secret'], access_token_key=config['access_token_key'], access_token_secret=config['access_token_secret']) wisdom = Generate.sample(seed) status = oracle.PostUpdate(wisdom) return status.text
def __init__(self, number_features=4, number_labels=2): self.num_feature = number_features self.num_label = number_labels self.hypo_superset = Generate.Gen_Superset(number_features, number_labels) self.hypo_subset = [] self.hypo_remaining_set = [] self.feature_remaining = [] self.true_hypothesis = [] self.prob_map = {} self.state_action_label_value_map = {}
def __init__(self): generate = Generate.Generate() #constantes self.number_threads = 22 self.number_iteration = 1000000 self.Qf = 20 self.nList = generate.n(20, 130, 5) self.sigmas = generate.sigmas(0.0, 2.0, 0.05) self.resultado2 = zeros((len(self.sigmas), len(self.nList))) self.resultado10 = zeros((len(self.sigmas), len(self.nList)))
def main(): api=Twit.create_twitter_api() try: api.verify_credentials() print("Authentication OK") except: print("Error during authentication") while(True): tweet=Generate.generate_random_statement() print(tweet) status=api.update_status(tweet) print(status.id) time.sleep(delay)
def dyna_gratings(speed, noise=True, reverse=False, generate_angle_file=True, file_index=0): stim_list = [] random_angles_path = exp.prepFolder + 'random_angles.csv' if generate_angle_file: Generate.generate_angles(speed, random_angles_path, n_angles=16) angles, speeds = np.loadtxt(random_angles_path)[:, file_index:].astype(int) for speed, angle in zip(speeds, angles): a0, a1 = bars(speed, reverse) dgrating = Stimulus.StimulusParameters() dgrating.filename = str(a0) + "to" + str(a1) + "algrating" + str( speed) + noise * '_noise5' + ".mat" dgrating.savevideo = "1" dgrating.externaltrigger = "1" dgrating.repeatstim = "0" dgrating.framelength = str(speed) dgrating.angle = str(angle) stim_list.append(dgrating) return stim_list
def generate_and_build(): """ Returns 0 on success, non-0 on failure. """ generate_result = Generate.generate() if generate_result != 0: print("Generate failed with return value '{}'".format(generate_result)) return generate_result build_result = Build.build() if build_result != 0: print("Build failed with return value '{}'".format(build_result)) return build_result return 0
def Init_Subset(self, length=1, user_subset=[], show_superset=False, show_subset=True): if len(user_subset) == 0: self.hypo_subset = Generate.Gen_Subset(self.hypo_superset, length) else: self.hypo_subset = user_subset if show_superset: Report.Print_Set(self.hypo_superset) print("----Superset----") if show_subset: Report.Print_Set(self.hypo_subset) print("----Subset----")
def gen(width, height, screen): xs = list(range(width)) random.shuffle(xs) for x in xs: for y in range(height): value = Generate.terrain((x, y), (50, 150))[0] w = 128 + 127 * value color = (w, w, w) if value > -0.5: color = (255, w, w) else: color = (w, w, 255) #screen.set_at((x, y), color) pygame.draw.rect(screen, color, (x*2, y*2, 2, 2)) pygame.display.update() print("Generation complete")
def populate(self): #Fill in blocks of this chunk for y in range(len(self.foreground_blocks)): for x in range(len(self.foreground_blocks[y])): #surface_depth = self.heights[x] + 2 + random.randrange(4) if y < World.SEA_LEVEL: self.set_blocks_at(x, y, World.get_block("air")) else: world_x = Convert.chunk_to_world(x, self) noise = Generate.terrain((world_x, y), (self.biome["maxelevation"], self.biome["minelevation"])) self.set_blocks_from_noise(x, y, noise[0], False) self.set_blocks_from_noise(x, y, noise[1], True) """elif y < self.heights[x]: self.set_blocks_at(x, y, World.get_block("water")) elif y < surface_depth: self.set_blocks_at(x, y, World.get_block(self.biome["surface"])) else: self.set_blocks_at(x, y, World.get_block(self.biome["base"]))""" self.decorate()
def determine_layout(): box_layout = {} for face_name in faces_names: try: with open('input/layout' + face_name + '.json', 'r') as load_f: layouts = json.load(load_f) temp = np.random.choice(layouts) # temp = layouts[-1] face_layout = {} for key, ele in temp.items(): face_layout[ele["type"]] = Generate.Location(ele["position"]["top"], ele["position"]["bottom"], \ ele["position"]["left"], ele["position"]["right"]) if ele["type"] in ["title", "slogan", "txt"]: face_layout[ele["type"]].font_setting(ele["FontSetting"]) if ele["type"] in ["title", "slogan"]: choice = np.random.choice(["bold", "normal"]) face_layout[ele["type"]].font_set["fontWeight"] = choice box_layout[face_name] = face_layout except FileNotFoundError: pass return box_layout
def design_face(self, key="F", inputs={}): """ :param key: :param inputs: list of input elements :return: """ face_size = src.get_face_size(self.face_data[key]) face_loc = src.get_face_loc(self.face_data[key]) face = Generate.Design(face_size, face_loc) layers = new_list(len(element_types)) for ele_type, input in inputs.items(): rank = element_types.index(ele_type) temp_layer = eval(layer_types[rank] + "(input)") layers[rank].append(temp_layer) layers = [i for item in layers for i in item] for layer in layers: face.insert_layer(layer) face.load_layout_b(self.layout[key], self.safe, face_size, face_loc) face.implement_palette(self.color_palette["1"]) self.faces[key] = face
import AL import Generate import Const #hypo = Generate.Uniform_Hypo_Table(1, False) ''' task = AL.ActiveLearning(knowledgeability=1) Generate.Transfer_User_Table(Const.user_hypo_table, Const.label_map) print(Const.user_hypo_table)P task.Set(user_hypo=Const.user_hypo_table) task.O_Task() ''' task = AL.ActiveLearning(knowledgeability=1) task.Set(user_hypo=Generate.Boundary_Hypo_Table(4, True)) task.DS_Task()
import Iter import Generate import numpy as np np.set_printoptions(suppress = True) matrix, b = Generate.Generate(5, 1.2) eps = 0.000001 print(" matrix:") print(matrix) print(" b:") print(b) acc = np.linalg.solve(matrix, b) print("\n numpy.linalg.solve: ") print(acc) print("\n Jacobi") x, steps = Iter.Jacobi(matrix, b, eps) print(x) print(steps) for i in range(len(x)): print(x[i] - acc[i]) print("\n Seidel:") x, steps = Iter.Seidel(matrix, b, eps) print(x) print(steps) for i in range(len(x)): print(x[i] - acc[i])
def char(c): return Generate.mk_first_match_rule(c) def text(s): return Generate.StringRule(s)
import Generate import numpy import copy num_feature = 4 hypo_table = Generate.Boundary_Hypo_Table(num_feature) num_hypo = len(hypo_table) x = numpy.array(range(num_feature)) for a in range(num_feature): t = numpy.random.randint(0, num_feature) x[t], x[a] = x[a], x[t] print(x) observation_steps = 2 # How many observations we want k_matrix = numpy.zeros((num_hypo, num_hypo)) for tr in range(num_hypo): '''True hypothesis''' true_hypo = hypo_table[tr] print(true_hypo) temp = list(range(num_hypo)) print("temp", temp) for idx in range(observation_steps): '''Search for the matching''' c_idx = x[idx] # The current feature index c_label = true_hypo[c_idx] for i in range(num_hypo): if c_label != hypo_table[i][c_idx]:
def main(argv): CreateSchema.main(argv) Generate.main("Schema.csv", "prototype.csv") Groupby.main(argv, "Schema.csv") Genplots.main(argv, "experiment.csv", "groups.csv")
import Generate import numpy import tensorflow import time import copy # =============================== # ====== [Hyperparameters] ====== # =============================== num_feature = 20 num_label = 2 knowledgeability = 1 iteration = 100 hypo_matrix = Generate.Boundary_Hypo_Table(num_feature, True) num_hypo = len(hypo_matrix) ptxy = 1 / num_feature / num_label # =============================== # ====== [Numpy Matrix] ========= # ====== [Memory Usage Note] ==== # ====== [1000 Features] ======== # ====== [About 300 MB RAM] ===== # =============================== # PYXH Matrix P_y_xh = numpy.empty((num_label, num_feature, num_hypo), dtype="float32") # Knowledgeability Matrix Delta_g_h = numpy.zeros((num_hypo, num_hypo), dtype="float32")
import numpy as np import Generate a = Generate.generate1() for i in range(0, 1000, 1): print i np.save("/disk3/Graduate-design/data/{:0>3d}.npy".format(i), np.stack([a.next() for x in range(1000)]))
def make_parser(self, debug_level = 0): """create a SAX compliant parser for this regexp""" import Generate tagtable, want_flg, attrlookup = Generate.generate(self, debug_level) return Parser.Parser(tagtable, (want_flg, debug_level, attrlookup))
import matplotlib.pyplot as plt # Parameters N = 30 # Number of observations L = 50 # Signals length K = 2 # Number of signals sigma = 0.2 # Noise level x = np.zeros((K, L)) # Generate Standard Normally Distributed signals for k in range(K): x[k] = np.random.standard_normal(L) x[k] = (x[k] - np.mean(x[k])) / np.linalg.norm(x[k] - np.mean(x[k]), 2) # Normalize signal y, true_partition = Generate.generate_MRA(N, K, L, sigma, x) max_corr = Generate.generate_maxcorr(N, L, y) G = Generate.generate_graph(max_corr, true_partition) edges, weights = zip(*nx.get_edge_attributes(G, 'weight').items()) pos = nx.spring_layout(G) plt.title("Standard Normal Gaussian MRA samples") nx.draw(G, pos, node_color=true_partition, node_size=20, edgelist=edges, edge_color=weights, width=1, cmap=plt.cm.jet, edge_cmap=plt.cm.Greens)
window = pygame.display.set_mode(sizes, pygame.FULLSCREEN) clock = pygame.time.Clock() map_size = 500 plane_size = 25 rotate_to_zero = -45 font1_size = 40 font2_size = 25 logo_size = 125 zoom_size = 50 cnt = 0 minutes = 60 display1 = 1060 display2 = 1060 flights = Generate.flights data = Generate.data time = Generate.real_time() # gets current time from modules current_time = int(time[11:][:2])*minutes+int(time[11:][3:]) # converts to minutes active = [] result = [] ind_flight = [] dest_flight = [] words1 = "" words2 = "" bounds1 = False bounds2 = False first_box = False second_box = False left_box = True result1 = False result2 = False ind_left = False
# Tree Model import numpy import Utility import copy import Generate # Generate the hypothesis matrix total_features = 4 hypo = Generate.Boundary_Hypo_Table(total_features, True) total_hypos = len(hypo) # Create the observation list arr = "" for x in range(total_features): arr += str(x) obs_list = numpy.array(list(Utility.Permutation(arr)), dtype=int) lst_size = len(obs_list) print(obs_list) best_route = {} # Count how many observations counting = 0 for true_idx in range(total_hypos): # Create the true hypo true_hypo = hypo[true_idx] print("True hypothesis = ", true_hypo) maximum = 0
w = 128 + 127 * value color = (w, w, w) if value > -0.5: color = (255, w, w) else: color = (w, w, 255) #screen.set_at((x, y), color) pygame.draw.rect(screen, color, (x*2, y*2, 2, 2)) pygame.display.update() print("Generation complete") if __name__ == "__main__": width = 100 height = World.HEIGHT pygame.init() screen = pygame.display.set_mode((width*2, height*2)) Generate.setup(100) gen(width, height, screen) while True: pygame.display.update() for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit(0) elif event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: sys.exit(0) elif event.key == pygame.K_SPACE: Generate.noise2d = perlin.PerlinNoiseFactory(2, octaves=3) gen(width, height, screen)
def generate(self, seed, player_options): os.makedirs(self.dir) self.player = Player.Player([0, 140], player_options) self.seed = seed Generate.setup(seed) self.generate_spawn()
[loss_MSE, loss_SAD], feed_dict={ F: F_test, B: B_test, I: I_test, alpha_diff: alpha_diff_target_test }) # for v in (zip(alpha_diff_target, sess.run(tf.get_default_graph().get_tensor_by_name("fc13/x:0"), # feed_dict={F:F_train, B:B_train, I:I_train, alpha_diff:alpha_diff_target}))): # print("%-.20f\t%-.20f\t%-.20f" % (v[0][0] , v[1][0], abs(v[0][0] - v[1][0]))) saver.save(sess, saver_file) else: with tf.Session(config=config) as sess: # restore the parameters with path saver.restore(sess, tf.train.latest_checkpoint(saver_path)) batch = Generate.next(batch_size) F_train = np.array([x['F'] for x in batch]) B_train = np.array([x['B'] for x in batch]) I_train = np.array([x['I'] for x in batch]) alpha_diff_target = np.array([x['alpha_diff'] for x in batch]).reshape([-1, 1]) # for v in [n.name for n in tf.get_default_graph().as_graph_def().node]: # print v for v in [n.name for n in tf.get_default_graph().as_graph_def().node]: print v print( sess.run(tf.get_default_graph().get_tensor_by_name("loss_MSE:0"), feed_dict={ F: F_train, B: B_train, I: I_train,
class limerick: def __init__(self): self.gen=Generate(wv_file='./storyline_for_reference/glove.6B.300d.word2vec.txt') self.mp = Meta_Poetry_Glove(wv_file='./storyline_for_reference/glove.6B.300d.word2vec.txt') #get set of templates """if type(template_dataset)==type(None): dataset, second_line, third_line, last_two=get_templates()#function in functions.py self.dataset=dataset self.second_line=second_line self.third_line=third_line self.last_two=last_two""" #set of part of speach with open('postag_dict_all.p','rb') as f: postag_dict=pickle.load(f) self.postag=postag_dict[2] def gen_limerick(self, word, templates_dataset=None): if type(templates_dataset)==type(None): dataset, second_line, third_line, last_two=get_templates_new()#function in functions.py ##### words=self.mp.get_five_words(word)[1:] print('Five words are: ', words) ########### if not self.gen.in_vocab(words): print ('Words not in vocab') return None ######################## #get postag of 4 words postag_words=[] for x in words: postag_words.append(self.postag[x][0]) print (postag_words) #### get templates if type(templates_dataset)==type(None): try: template_2=random.choice(second_line[postag_words[0]]) template_3=random.choice(third_line[postag_words[1]]) template_4=random.choice(dataset[postag_words[2]]) template_5=random.choice(dataset[postag_words[3]]) except KeyError: print ('POS not in set of templates') return None else: template_2, template_3, template_4, template_5=templates_dataset ####################### #2nd line############ if type(template_2)==tuple: print(template_2) template_2=template_2[0] line_2=self.gen.genPoem_backward(words[0],template_2) ################## #3rd line if type(template_3)==tuple: print(template_3) template_3=template_3[0] line_3=self.gen.genPoem_backward(words[1],template_3) ############### #4th line if type(template_4)==tuple: print(template_4) template_4=template_4[0] line_4=self.gen.genPoem_backward(words[2],template_4) ############# #5th line if type(template_5)==tuple: print(template_5) template_5=template_5[0] line_5=self.gen.fifth_line(line_4[0][1][1], words[-1], template_5) print (template_2) print (template_3) print (template_4) print (template_5) print ('*************\n') print('\n'+' '.join(line_2[0][1][1])) print(' '.join(line_3[0][1][1])) print(' '.join(line_4[0][1][1])) print(' '.join(line_5[0][1][1][1:]))
def main(): parser = argparse.ArgumentParser( description="Train Midi files on an LSTM." + "Note: Weights will be saved after every Epoch") parser.add_argument('-M', '--Model', default='A', type=str, help="Model A or B (Default: Model 'A')") parser.add_argument( '-W', '--Weights', default=None, type=str, help="Load weights to continue training (Default: None)") parser.add_argument('-D', '--Directory', default='.', type=str, help="Directory of Midi Files (Default: '.')") parser.add_argument('-E', '--Epochs', default=100, type=int, help="Number of Epochs (Default: 100)") parser.add_argument('-O', '--Outputs', default=1, type=int, help="Number of Generated Output(s) (Default: 1)") parser.add_argument('-BS', '--Batch_Size', default=128, type=int, help="Batch Size (Default: 128)") parser.add_argument('-SL', '--Sequence_Length', default=100, type=int, help="Sequence Length (Default: 100)") args = parser.parse_args() # You may edit these variables if you are using Anaconda model = args.Model weights = args.Weights directory = args.Directory num_epochs = args.Epochs num_outputs = args.Outputs batch_size = args.Batch_Size sequence_length = args.Sequence_Length # Initialize Training Neural Network train_NN = Train.Train(directory, num_epochs, batch_size, sequence_length, model, weights) # Train Neural Network train_NN.train_network() # Gets the name of the last edited/created file in the current directory new_weights = last_generated_weights() print("Generating...") print("Generating for {}".format(new_weights)) # Initialize Generate Neural Network gen = Generate.Generate(sequence_length, model, new_weights, num_outputs) # Generate music gen.generate_music()