def __init__(self): #Configure the source API. self.__crawlerReportURL = utils.read_config('reportAPI') self.__crawlerMapURL = utils.read_config('mapURL') self.__crawlerAvalancheURL = utils.read_config('avalancheURL') if len(self.__crawlerReportURL) == 0: raise ValueError("Empty report API URL in the configuration file!") #Configure the crawler. if sys.platform == "linux2": chromedriver = utils.get_project_full_path( ) + "/bin/chromedriver_linux" #This is the Linux driver. else: chromedriver = utils.get_project_full_path( ) + "/bin/chromedriver_osx" #Assume it's on OS X otherwise, not using Win. os.environ["webdriver.chrome.driver"] = chromedriver self._crawlerViewDriver = webdriver.Chrome(chromedriver) self._crawlerViewDriver.implicitly_wait(4) self._crawlerViewDriver.set_page_load_timeout(8) #Configure the DB interface. dbFile = utils.get_project_full_path() + utils.read_config('dbFile') self._DBManager = db_manager.CrawlerDB(dbFile)
def main(args): dataset = read_config('pivots.json') config = read_config('config.json') bucket = boto3.resource('s3').Bucket(config['S3_BUCKET_DATASETS']) types = {} df = pd.read_csv(args.input_file) for t, columns in types.iteritems(): if t in ('int64', 'float64'): for c in columns: df[c] = df[c].replace({99999: None}) df['STAT_PROFILE_DATE_MONTH'] = df.apply(lambda row: '%s-%s' % (row['STAT_PROFILE_DATE_YEAR'], str(row['MONTHS']).zfill(2)), axis=1) for c in dataset['columns']: for r in dataset['rows']: if r == c: continue for func, values in dataset['values'].iteritems(): for v in values: key = '-'.join([func, v, r, c]) print(key) df2 = df[[r, c, v]].reset_index() df2[v].fillna(0, inplace=True) res = getattr(df2.groupby([r, c]), func)().reset_index().pivot(index=r, columns=c, values=v) res.reset_index(level=0, inplace=True) res.fillna(0, inplace=True) d = Dataset(key=key, data=res.to_dict(orient='split'), row=r, column=c, value=v, func=func) db.session.add(d) bucket.put_object(Key='%s.csv' % (key,), Body=res.to_csv(), ACL='public-read') db.session.commit() print('Success')
def main(params_file): params = {} params_file = os.path.realpath(params_file) # Figure out the paths script_path = os.path.realpath(__file__) script_dir = os.path.dirname(script_path) app_dir = os.path.dirname(script_dir) params['app_dir'] = app_dir # Current timestamp curr_timestamp = time.strftime("%d_%b_%Y_%H_%M_%S_GMT", time.gmtime()) # read configuration file and check utils.read_config(params_file, params) gen_class = GenderClassifier() # gen_class.loadData(params['process_country_gender_output_file_gender'], # params['process_country_gender_output_file_gender_zh'], params['gender_classifier_model_file_zh']) gen_class.loadModel(params['gender_classifier_model_file_zh']) t1 = time.time() for c in ascii_lowercase: print gen_class.predict(c) print gen_class.predict('Diyi Yang') t2 = time.time() print("Predict Time: %f ms" % ((t2 - t1) * 1000)) print gen_class.predict('Yuntian Deng')
def main(): args = parse_args() set_global_seeds(666) config = read_config(args.config, "TRAIN") config_main = read_config(args.config, "MAIN") pprint(config) factory = Factory(config['train_params']) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') callbacks = create_callbacks(config['train_params']['name'], config['dumps']) trainer = Runner(stages=config['stages'], factory=factory, callbacks=callbacks, device=device) aug_train = AUGMENTATIONS_TRAIN_CROP if config['train_params']['type'] == 'crop' else AUGMENTATIONS_TRAIN aug_test = AUGMENTATIONS_TEST_CROP if config['train_params']['type'] == 'crop' else AUGMENTATIONS_TEST train_dataset = SegmentationDataset(data_folder=config_main['path_to_data'], transforms=aug_train, phase='train', activation=config_main['activation'], fold=config['fold'], empty_mask_params=config['data_params']['empty_mask_increase']) val_dataset = SegmentationDataset(data_folder=config_main['path_to_data'], transforms=aug_test, phase='val', fold=config['fold'], activation=config_main['activation']) train_loader = DataLoader(train_dataset, batch_size=config['batch_size'], shuffle=True, num_workers=16, drop_last=True) val_loader = DataLoader(val_dataset, batch_size=config['batch_size'], shuffle=False, num_workers=16) os.makedirs(os.path.join(config['dumps']['path'], config['dumps']['weights'], config['train_params']['name']), exist_ok=True) shutil.copy(args.config, os.path.join(config['dumps']['path'], config['dumps']['weights'], config['train_params']['name'], args.config.split('/')[-1])) trainer.fit(train_loader, val_loader)
def main(): use_proxy = read_config('use_proxy') server_address = read_config( 'proxy_address') if use_proxy else read_config('server_address') generated_string = run(server_address, 100, read_config('use_proxy')) print(f'Length: {len(generated_string)}') print(compress_string(generated_string))
def get_db_session(): global db_engine if db_engine is None: db_engine = create_engine( 'postgresql://{username}:{password}@{hostname}/{database}'.format( database=utils.read_config('postgresql', 'database'), username=utils.read_config('postgresql', 'user'), password=utils.read_config('postgresql', 'password'), hostname=utils.read_config('postgresql', 'host'))) return sessionmaker(bind=db_engine)()
def test_read_config(self): mock_app = mock.Mock() mock_app.config = {} mock_config_file = {"one": "1", "two": "2"} with patch('utils.open', mock_open(read_data=json.dumps(mock_config_file))) as m: utils.read_config(mock_app) self.assertTrue(m.called, "open did not take place") self.assertDictEqual(mock_app.config, mock_config_file, "App config does not have expected values")
def convert(): dataset_name = read_config('dataset_name') train_data = get_dataset(dataset_name, train=True, shuffle=True) test_data = get_dataset(dataset_name, train=False, shuffle=False) # parent folder tfrecord_dir = read_config('tfrecord_dir') if os.path.isdir(tfrecord_dir): shutil.rmtree(tfrecord_dir) os.mkdir(tfrecord_dir) to_tfrecords(train_data) to_tfrecords(test_data)
def main(params_file): params = {} params_file = os.path.realpath(params_file) # Figure out the paths script_path = os.path.realpath(__file__) script_dir = os.path.dirname(script_path) app_dir = os.path.dirname(script_dir) params['app_dir'] = app_dir # Current timestamp curr_timestamp = time.strftime("%d_%b_%Y_%H_%M_%S_GMT", time.gmtime()) # read configuration file and check utils.read_config(params_file, params) process_country_gender(params)
def __init__(self, path): """ :param path: path to theme file :type path: str :raises: :class:`~alot.settings.errors.ConfigError` """ self._spec = os.path.join(DEFAULTSPATH, 'theme.spec') self._config = read_config(path, self._spec, checks={'align': align_mode, 'widthtuple': width_tuple, 'force_list': force_list, 'attrtriple': attr_triple}) self._colours = [1, 16, 256] # make sure every entry in 'order' lists have their own subsections threadline = self._config['search']['threadline'] for sec in self._config['search']: if sec.startswith('threadline'): tline = self._config['search'][sec] if tline['parts'] is not None: listed = set(tline['parts']) here = set(tline.sections) indefault = set(threadline.sections) diff = listed.difference(here.union(indefault)) if diff: msg = 'missing threadline parts: %s' % ', '.join(diff) raise ConfigError(msg)
def XST(env, target, source): """ A pseudo-Builder wrapper for the XST synthesizer Reads the config file Creates the XST Project file (containing all the verilog) Creates the XST Script file (containing all of the commands) Executes the build Args: env (SCons Environment) target (list of strings) source (list of strings) Returns: The output file list Raises: XSTBuilderWarning XSTBuilderError """ #OKAY MAYBE I DIDN'T NEED A PSUEDO-BUILDER config = utils.read_config(env) _xst_builder.__call__(env, env["XST_NGC_FILE"], config["verilog"]) return [xst_utils.get_ngc_filename(config)]
def main(): # Setup argument parsing parser = argparse.ArgumentParser(description='Monitor bluetooth igrill devices, and export to MQTT') parser.add_argument('-c', '--config', action='store', dest='config_directory', default='.', help='Set config directory, default: \'.\'') parser.add_argument('-l', '--log-level', action='store', dest='log_level', default='INFO', help='Set log level, default: \'info\'') parser.add_argument('-d', '--log-destination', action='store', dest='log_destination', default='', help='Set log destination (file), default: \'\' (stdout)') options = parser.parse_args() config = read_config(options.config_directory) # Setup logging log_setup(options.log_level, options.log_destination) # Get device list devices = get_devices(config['devices']) # Connect to MQTT client = mqtt_init(config['mqtt']) base_topic = config['mqtt']['base_topic'] polling_interval = config['interval'] if 'interval' in config else 15 while True: for device in devices: publish(device.read_temperature(), device.read_battery(), client, base_topic, device.name) time.sleep(polling_interval)
def create_device_tree(self): ## TODO rename _items and _item if os.getuid() == 0: self.deviceTableWidget.clearContents() self.deviceTableWidget.setRowCount(0) devices = utils.get_linux_hdd() _bytes = True for disk in devices: self.deviceTableWidget.insertRow( self.deviceTableWidget.rowCount()) _items = list(utils.get_partition_info(disk, _bytes)) _items.insert(0, disk) for column, item in enumerate(_items): _item = QtWidgets.QTableWidgetItem() _item.setText(str(item)) self.deviceTableWidget.setItem( self.deviceTableWidget.rowCount() - 1, column, _item) activity = QtWidgets.QTableWidgetItem() if utils.get_partition_info(disk)[1] in utils.read_config( 'settings')['locked']: activity.setText(self.tr('locked')) activity.setForeground(QtGui.QBrush(QtGui.QColor('red'))) elif disk in self.control_list.keys(): activity.setText(self.tr('running')) activity.setForeground(QtGui.QBrush(QtGui.QColor('green'))) else: activity.setText(self.tr('ready')) self.deviceTableWidget.setItem( self.deviceTableWidget.rowCount() - 1, 4, activity) # Changing width of the column for model and serial self.deviceTableWidget.setColumnWidth(1, 200) self.deviceTableWidget.setColumnWidth(2, 200)
def get(self): self.logger.info('Get status') db = Database() config = read_config() targets = json.load(open(config.get('targets'))) for target in targets: self.logger.info('Getting status for "%s"', target['name']) target_logs = db.get_entries(target['target_id'], 1) if len(target_logs) == 0: target['code'] = -1 target['checked'] = '' continue newest_log = target_logs[0] target['code'] = newest_log['code'] target['checked'] = timestamp_to_string(newest_log['timestamp']) target['response_title'] = newest_log['response_title'] if 'cookie_path' in target: del target['cookie_path'] if 'url' in target: del target['url'] self.logger.info('Return status for %d objects', len(targets)) resp = flask.Response(json.dumps(targets)) resp.headers['Access-Control-Allow-Origin'] = '*' resp.headers['Content-Type'] = 'application/json' return resp
def encode(): config = read_config() base, _, _, _ = sift1m_read() D = base.shape[1] ENCODE_ITER, ILS_ITER, ICM_ITER, RAND_ORD, N_PERT, M, K, ILS_BATCH = config.ENCODE_ITER, config.ILS_ITER, config.ICM_ITER, config.RAND_ORD, config.N_PERT, config.M, config.K, config.ILS_BATCH if not os.path.isfile('./result/C.npy'): raise FileExistsError("Can't find codebook in ./result/C.npy") with mp.Pool(mp.cpu_count()) as pool: lsq = LSQ(ILS_BATCH, M, K, pool, logging=False) C = np.load('./result/C.npy') B = np.random.randint(0, K, size=[base.shape[0], M]) + np.arange(0, M*K, K)[None, :] for i in range(ENCODE_ITER): print(f"Encoding # {i}") start = time.time() B = lsq.encode(base, B, C, 1, ILS_ITER, ICM_ITER, RAND_ORD, N_PERT) save(B, M, K) end = time.time() print('use %4ds' % (start-end)) print(quantization_error(base, B, C))
def __init__(self): super().__init__() self.conf = read_config('model') # Byte Level self.byte_embedding = nn.Embedding(256, eval(self.conf['ByteEmbeddingDim'])) self.byte_biGRU = nn.GRU( input_size=eval(self.conf['ByteEmbeddingDim']), hidden_size=eval(self.conf['ByteGRUHiddenDim']), bidirectional=True) self.byte_attention = Attention(of='Byte') # Packet Level self.packet_biGRU = nn.GRU( input_size=eval(self.conf['PacketEmbeddingDim']), hidden_size=eval(self.conf['PacketGRUHiddenDim']), bidirectional=True) self.packet_attention = Attention(of='Packet') # Final Classification self.final_classification = nn.Linear( in_features=eval(self.conf['PacketGRUHiddenDim']) * 2, out_features=eval(self.conf['NumFlowClass']))
def main(argv): # arguments parsing parser = argparse.ArgumentParser() parser.add_argument('-c', '--config', help="config file (default: config/development.conf", default="./config/development.conf") args = parser.parse_args() config = utils.read_config(args.config) years = range(config['scraping']['years_range'][0], config['scraping']['years_range'][1] + 1) n_proc = config['scraping']['n_proc'] # create the folders in which the poster will be downloaded for year in years: utils.create_folder('{}/{}/posters'.format(PATH_IMGS, year)) utils.create_folder('{}/{}/thumbnails'.format(PATH_IMGS, year)) # Downloading the posters with multiprocessing (highly speed up compare to single process) print('Retrieve url of posters') with Pool(n_proc) as p: yearly_urls = p.map(get_yearly_url_imgs, years) yearly_urls = list(itertools.chain.from_iterable(yearly_urls)) # push to db session = db_manager.get_db(config['general']['db_uri']) objects = [db_manager.Poster(x) for x in yearly_urls] session.bulk_save_objects(objects) session.commit()
def runSVM(train_data, test_data): """ Run the SVM model on SpaCy embeddings and predict labels on test data Parameters ---------- train_data: (list) Preprocessed Training Data test_data: (list) Preprocessed Test Data (IDs and Text) ---------- Returns ---------- results: (list) accuracy: (float) f1_score: (float) ---------- """ SVM = read_config(filename="config.ini", section="SVM") results, accuracy, f1_score = svmModel(train_data, test_data, validation=bool( SVM["VALIDATION"]), validation_size=float(SVM["VALIDATION_SIZE"]), download_pretrained=bool(SVM["DOWNLOAD_PRETRAINED_SPACY"])) writeResults(PATHS["SVM_FILE"], results) return results, accuracy, f1_score
def play_sound(fname): global cfg_dict cfg_dict = utils.read_config() audio = pyaudio.PyAudio() stream_audio = audio.open(format=pyaudio.paInt16, channels=2, rate=cfg_dict['rate'], output=True, frames_per_buffer=cfg_dict['rate']) f = open(fname) data = list(utils.chunks(f.read(), cfg_dict['rate'])) f.close() print ' * play' start = time.time() for i in range(len(data)): stream_audio.write(data[i]) print ' * play end |', stream_audio.stop_stream() stream_audio.close() audio.terminate() print 'elapsed time =', time.time() - start
def main(r_parser): """ :param r_parser: 传入的参数 :return: """ r_parser.add_argument("-m", "--module", help="module", required=True) r_parser.add_argument("-u", "--user", help="remote machine user", required=True) r_parser.add_argument("-p", "--password", help="remote machine password") r_parser.add_argument("-d", "--destination", help="destination hosts group", required=True) r_parser.add_argument("-c", "--command", help="command or shell scripts", required=True) args = r_parser.parse_args() module = args.module user = args.user password = args.password dest = args.destination command = args.command config = utils.read_config('./rcm.conf') hosts_lst = utils.get_hosts(config, dest) _exe_command(hosts_lst, module, user, password, command)
def change_end_date(data): message = "" try: config = read_config(config_path) except FileNotFoundError: message = f"There was a problem. The configuration file `{config_path}`, could not be found or read." logger.exception("", exc_info=True) except Exception: message = f"There was a problem with configuration file: `{config_path}`" logger.exception("", exc_info=True) else: new_end_date = data["text"].split(" ")[4] try: new_date = date.fromisoformat(new_end_date) config["end_date"] = new_end_date new_config = toml.dumps(config) write_config(new_config, config_path) message = f"Date changed to {new_end_date}" # need to write better error message for this(date is out of range or wrong formatting) except ValueError: logger.info("User inputted {new_end_date} for end date. ") logger.exception("", exc_info=True) message = "Format should be yyyy-mm-dd, i.e. 2019-02-03" response = slack_client.chat_postMessage(channel="#general", text=message) try: assert response["ok"] except AssertionError: logger.exception("", exc_info=True) logger.debug(f"{response}")
def change_start_date(data): message = "" try: config = read_config(config_path) except FileNotFoundError: message = f"There was a problem. The configuration file `{config_path}`, could not be found or read." logger.exception("", exc_info=True) except Exception as error: message = f"There was a problem with configuration file: `{config_path}`" logger.exception("", exc_info=True) else: new_start_date = data["text"].split(" ")[4] try: new_date = date.fromisoformat(new_start_date) config["start_date"] = new_start_date new_config = toml.dumps(config) write_config(new_config, config_path) message = f"Date changed to {new_start_date}" logger.info(f"Start date changed to {new_start_date}") except ValueError as error: logger.info(f"User wrote this date as input: {new_start_date}") logger.info(f"User wrote this date with error: {error}") message = f"Format should be yyyy-mm-dd, i.e. 2019-01-03" response = slack_client.chat_postMessage(channel="#general", text=message) try: assert response["ok"] except AssertionError: logger.exception("", exc_info=True) logger.debug(f"{response}")
def main(): try: cfg = read_config("config.json") start_alarm_server(cfg) except KeyError: print("Please add config.json") sys.exit(1)
def main(): global cfg, out_dir args = parse_args() cfg = read_config(args.exp_config) for k, v in cfg.items(): print(k.lower(), ':', v) out_dir = join(cfg['OUT_DIR'], cfg['TEST_SUFFIX']) if not exists(out_dir): os.makedirs(out_dir) log_dir = join(out_dir, 'logs') if not exists(log_dir): os.mkdir(log_dir) # Set loggers scrn_handler = logging.StreamHandler() scrn_handler.setFormatter(formatter) logger_s.addHandler(scrn_handler) file_handler = logging.FileHandler(filename=join( log_dir, '{}_{}_{:04d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}.{}'.format( cfg['TEST_SUFFIX'], args.cmd, *localtime()[:6], 'log'))) file_handler.setFormatter(formatter) logger_f.addHandler(file_handler) if args.cmd == 'train': train_cnn(args) elif args.cmd == 'test': test_cnn(args)
def main(config_path: str, model_path: str): config_path = config_path config = read_config(config_path) config['config_path'] = config_path num_gpus = config['num_gpus'] num_protocols = config['num_protocols'] num_tests = config['num_tests'] params = load_parameters("data/default-parameters.json") tumors = [load_state("data/tumor-lib/tumor-{}.txt".format(i), params) for i in range(1, 11)] model_temp = TemporalFusionTransformer.load_from_checkpoint(model_path) model = MockPredictionModel(model_temp) hour_steps = config['hour_steps'] protocol_resolution = config['protocol_resolution'] converter = ConvertRepresentation(hour_steps=hour_steps, protocol_resolution=protocol_resolution) pair_protocols = get_rand_population( num_protocols=config['num_protocols'], max_dose_value=config['max_dose_value'], time_interval_hours=config['time_interval_hours']) list_protocols = [ converter.convert_pairs_to_list(protocol=protocol) for protocol in pair_protocols ] new_genetic_algorithm(population=list_protocols, model=model, config=config, converter=converter)
def vary_hyperparameters(config, hyperparams, device, epochs, replications, seed, num_data_workers): """ Run a config with all combinations of hyperparameters specified. The script takes the same parameters as run.py and an additional list of hyperparameters to vary over. It will span a grid of these hyperparameters and execute run.py with a modified config dict for each combination of hyperparameter values on the grid. :param config: path to config JSON file :param hyperparams: list of hyperparams as string ('hparam.in.dot.notation=['list','of','values']') :param device: device to train on :param epochs: number of epochs to train :param replications: number of replications for each run :param seed: random seed passed to run.py :param num_data_workers: number of data loading processes """ config = utils.read_config(config) # Parse list of hyperparameters from CLI grid_axis = {} for param in hyperparams: param, values = param.split('=') values = eval(values) grid_axis[param] = values # Create hparam grid hyperparams = sklearn.model_selection.ParameterGrid(grid_axis) # Execute run.py for each modified config for params in hyperparams: config = modify_config(config, params) run(config, device, epochs, replications, seed, num_data_workers)
def main(argv): del argv cfg = utils.read_config('setup.ini', 'glove_128') word_to_vectors = utils.load_wv_dct(cfg['file_path']) filter_table = utils.load_filter_set(cfg['filter_path']) bucket_data = utils.load_bucket(cfg['sample_path']) bucket_data = utils.flatten_bucket(bucket_data) bucket_pairs = utils.load_bucket_pairs(cfg['sorted_bucket_pairs_path']) train_bucket_pairs = bucket_pairs[:2000] # Train training_dataset = data_factory.generate_data(bucket_data, train_bucket_pairs, filter_table, word_to_vectors, cfg) train_model(training_dataset, cfg['compress_model_path']) # Gernerate compress vector and binary vector select_t_bucket_pairs = bucket_pairs[2000:2050] grid_search_dataset = data_factory.generate_evaluate_data( bucket_data, select_t_bucket_pairs, filter_table, word_to_vectors) threshold = compressor.solve_truncate_threshold(grid_search_dataset, cfg) # Dimensionality reduction compressor.produce_short_vectors(word_to_vectors, cfg) # Binarization compressor.produce_binary_vectors(threshold, cfg)
def main(): config = utils.read_config() elogger = logger.get_logger() # initialize arrays for short-term and long-term traffic features speed_array = 'speeds' time_array = 'times' short_ttf = [ [collections.defaultdict(lambda: {speed_array: [], time_array: []}) for _ in range(256)] for _ in range(256) ] long_ttf = [ [collections.defaultdict(lambda: {speed_array: [], time_array: []}) for _ in range(256)] for _ in range(256) ] for data_file in config['data']: elogger.info('Generating G and T paths and extracting traffic features on {} ...'.format(data_file)) data = utils.read_data(data_file) define_travel_grid_path(data, config['coords'], short_ttf, long_ttf, args.grid_size) elogger.info('Saving extended with G and T paths data in {}{}.\n'.format(args.data_destination_folder, data_file)) utils.save_processed_data(data, args.data_destination_folder, data_file) elogger.info('Aggregate historical traffic features ...') utils.aggregate_historical_data(short_ttf, long_ttf) elogger.info('Saving extracted traffic features in {}'.format(args.ttf_destination_folder)) utils.save_extracted_traffic_features(short_ttf, long_ttf, args.ttf_destination_folder)
def fit_from_dir(self, dir_path): tasks = [] for file_name in os.listdir(dir_path): if file_name.endswith(".json"): data = read_config(os.path.join(dir_path, file_name)) tasks.append(data) return self.fit(tasks)
def __init__(self, symbol, run_path, actor, config_path=None): #save the variables self.symbol = symbol self.run_path = run_path self.info_path = self.run_path + "/info.json" self.config_path = config_path #config dictionary self.config = read_config(path=config_path) #info dictionary self.info = read_json(path=self.info_path) #setup the ldb self.ldb = LiveDataBase(symbol=self.symbol, run_path=self.run_path, config_path=self.config_path) #save the actor self.actor = actor #setup the actionlog self.actionlog = ActionLog(size=100) #setup the broker self.broker = Broker(symbol=self.symbol, testing=True) #setup the gui self.gui = Gui(hook=self)
def __init__(self, bot): self.bot = bot self.config = utils.read_config('todo_config') self.db = self.config['database'] self.conn = sqlite3.connect(self.db) self.c = self.conn.cursor() self.create_tables()
def run(self): # read config config_db = read_config(self.config) # extract company graph info extract_company_subgraph(config_db, str(self.date), self.root_dir)
def main(argv): """ Entry point for etl module. """ option_parser = optparse.OptionParser(usage=DEFAULT_USAGE_TEXT) option_parser.add_option("-c", "--config", dest="config", default="config.cfg", help="Configuration file") option_parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Show verbose output") options, _ = option_parser.parse_args(argv) if not os.path.exists(options.config): sys.stderr.write("ERROR: {} does not exist\n".format(options.config)) option_parser.print_help() return 1 config = read_config(options.config) log_dir = config['general']['log_dir'] if not os.path.exists(log_dir): os.mkdir(log_dir) filename = os.path.join(log_dir, __file__.replace(".py", ".log")) setup_logger(filename, options.verbose) logging.debug("config={}".format(json.dumps(config, indent=2))) retcode = run_etl(config) return retcode
def upload(env, target, source): """ A Pseudo-builder wrapper for the upload program """ config = utils.read_config(env) _upload_builder.__call__(env, target, source) return "upload_img"
def modify_results(results_filename, exam_config_filename, output_filename, invalidate, set_correct): config = utils.read_config() results = utils.read_results(results_filename) exam_data = utils.ExamConfig(exam_config_filename) for result in results: modify(result, exam_data, invalidate, set_correct) utils.write_results(results, output_filename, config['csv-dialect'])
def test_create_build_directory(self): cfg = utils.read_config(self.env) build_dir = "temp" cfg["build_dir"] = build_dir utils.create_build_directory(cfg) build_dir = os.path.join(utils.get_project_base(), build_dir) self.assertTrue(os.path.exists(build_dir)) os.rmdir(build_dir)
def BITGEN(env, target, source): """ A pseudo-builder wrapper for Xilinx Bitgen """ config = utils.read_config(env) env["BITGEN_SOURCES"] = source env["BITGEN_TARGETS"] = target _bitgen_builder.__call__(env, target, source) return bitgen_utils.get_bitgen_filename(config)
def __init__(self, path): """ :param path: path to theme file :type path: str :raises: :class:`~alot.settings.errors.ConfigError` """ self._spec = os.path.join(DEFAULTSPATH, 'theme.spec') self._config = read_config(path, self._spec) self.attributes = self._parse_attributes(self._config)
def main(params_file): params = {} params_file = os.path.realpath(params_file) # Figure out the paths script_path = os.path.realpath(__file__) script_dir = os.path.dirname(script_path) app_dir = os.path.dirname(script_dir) params['app_dir'] = app_dir # Current timestamp curr_timestamp = time.strftime("%d_%b_%Y_%H_%M_%S_GMT", time.gmtime()) # read configuration file and check utils.read_config(params_file, params) gen_class = GenderClassifier() #gen_class.loadData(params['process_country_gender_output_file_gender'], params['process_country_gender_output_file_gender_zh'], params['gender_classifier_model_file_zh']) gen_class.loadModel(params['gender_classifier_model_file_zh']) t1 = time.time() for c in ascii_lowercase: print gen_class.predict(c) print gen_class.predict('Diyi Yang') t2 = time.time() print("Predict Time: %f ms" % ((t2 - t1) * 1000)) print gen_class.predict('Yuntian Deng') print gen_class.predict('Hanyue Liang') print gen_class.predict('Lidan Mu') print gen_class.predict('Lanxiao Xu') print gen_class.predict('Zhiruo Zhou') print gen_class.predict('Charlotte Riley') print gen_class.predict('Qi Guo') print gen_class.predict('Eric Xing') print gen_class.predict('Yiming Yang') print gen_class.predict('Xiaoping Deng') print gen_class.predict('Yidi Zhao') print gen_class.predict('Yuanchi Ning') print gen_class.predict('Jinping Xi') print gen_class.predict('Liyuan Peng') print gen_class.predict('Wei-chiu Ma') print gen_class.predict('Zhiting Hu') print gen_class.predict('Hao Zhang') print gen_class.predict('Li Zhou') print gen_class.predict('Shayan Doroudi') print gen_class.predict('Daniel Guo') print gen_class.predict('Christoph Dann')
def TRACE(env, target, source): """ A pseudo-builder wrapper for Xilinx trace """ config = utils.read_config(env) env["TRACE_SOURCES"] = source env["TRACE_TARGETS"] = target _trace_builder.__call__(env, target, source) return trace_utils.get_trace_filename(config)
def PAR(env, target, source): """ A pseudo-builder wrapper for the Xilinx par """ config = utils.read_config(env) env["PAR_SOURCES"] = source env["PAR_TARGETS"] = target _par_builder.__call__(env, target, source) return par_utils.get_par_filename(config)
def generate(env): env["UPLOAD_COMMAND"] = _detect(env) config = utils.read_config(env) upload_flags = config["upload_flags"] env.SetDefault( UPLOAD_FLAGS = upload_flags, UPLOAD_COMSTR = "$UPLOAD_COMMAND $UPLOAD_FLAGS $SOURCES" ) env.AddMethod(_upload_builder, "upload") return None
def __init__(self, pid, role, config_path): self.__config = utils.read_config(config_path) for r in self.__config: if r in ('acceptors', 'proposers', 'clients', 'learners'): self.__config[r][1] = int(self.__config[r][1]) self.__role = role self.__multicast_group = tuple(self.__config[self.__role]) self._id = pid self.__recv_socket = self.__init_socket(self.__multicast_group[0]) self.__recv_socket.bind(self.__multicast_group)
def COREGEN(env, target, source): """ A pseudo-builder wrapper for Xilinx coregen """ config = utils.read_config(env) source = coregen_utils.get_new_coregen_file_list(config) env["COREGEN_SOURCES"] = source env["COREGEN_TARGETS"] = target for s in source: env["COREGEN_SCRIPT"] = s _coregen_builder.__call__(env, target, s) return coregen_utils.get_target_files(config)
def generate(env): env["VENDOR_RESET_COMMAND"] = _detect(env) config = utils.read_config(env) vidpid = config["target_vid_pid"] upload_tool = config["upload_tool"] env.SetDefault( VENDOR_RESET_TARGET = vidpid, VENDOR_RESET_COMSTR = "$VENDOR_RESET_COMMAND -vr $VENDOR_RESET_TARGET" ) env.AddMethod(_vendor_reset_builder, "vendor_reset") return None
def MAP(env, target, source): """ A pseudo-builder wrapper for the Xilinx map """ config = utils.read_config(env) env["MAP_SOURCES"] = source env["MAP_TARGETS"] = target _map_builder.__call__(env, target, source) return map_utils.get_map_filename(config)
def __init__(self, interface, session_file=None): self.interface = interface self.mode = ProgramManager.mode_no_session self.config = utils.read_config() self.imageproc_context = \ imageproc.ExamCaptureContext(camera_id=self.config['camera-dev']) self.imageproc_options = None self.drop_next_capture = False self.dump_buffer = False self._register_listeners() self.from_manual_detection = False if session_file is not None: self._try_session_file(session_file)
def init_merchant(merchantName): configFile = './config/' + merchantName + '.cfg' config = read_config(configFile) siteUrl = config.get2("url") if not siteUrl: siteUrl = 'www.' + merchantName + '.com' m = Merchant(merchantName, siteUrl) m.crawlEntryUrl = config.get2("crawlEntryUrl") headersFile = config.get2("headersFile") if headersFile: m.headers = load_http_headers('./config/' + headersFile) m.config = config return m
def read_config(self, path): """parse alot's config file from path""" spec = os.path.join(DEFAULTSPATH, 'alot.rc.spec') newconfig = read_config(path, spec, checks={'mail_container': mail_container, 'force_list': force_list, 'align': align_mode, 'attrtriple': attr_triple, 'gpg_key_hint': gpg_key}) self._config.merge(newconfig) hooks_path = os.path.expanduser(self._config.get('hooksfile')) try: self.hooks = imp.load_source('hooks', hooks_path) except: logging.debug('unable to load hooks file:%s' % hooks_path) if 'bindings' in newconfig: newbindings = newconfig['bindings'] if isinstance(newbindings, Section): self._bindings.merge(newbindings) # themes themestring = newconfig['theme'] themes_dir = self._config.get('themes_dir') if themes_dir: themes_dir = os.path.expanduser(themes_dir) else: configdir = os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config')) themes_dir = os.path.join(configdir, 'alot', 'themes') logging.debug(themes_dir) # if config contains theme string use that if themestring: if not os.path.isdir(themes_dir): err_msg = 'cannot find theme %s: themes_dir %s is missing' raise ConfigError(err_msg % (themestring, themes_dir)) else: theme_path = os.path.join(themes_dir, themestring) try: self._theme = Theme(theme_path) except ConfigError as e: err_msg = 'Theme file %s failed validation:\n' raise ConfigError((err_msg % themestring) + e.message) # if still no theme is set, resort to default if self._theme is None: theme_path = os.path.join(DEFAULTSPATH, 'default.theme') self._theme = Theme(theme_path) self._accounts = self._parse_accounts(self._config) self._accountmap = self._account_table(self._accounts)
def _detect(env): try: return env["UPLOAD_COMMAND"] except KeyError: pass config = utils.read_config(env) upload_command = config["upload_tool"] if upload_command: return upload_command raise SCons.Errors.StopError( UPLOADBuilderError, "Could not find Arm Assember")
def _detect(env): try: return env["VENDOR_RESET_COMMAND"] except KeyError: pass config = utils.read_config(env) vendor_reset_command = config["upload_tool"] if vendor_reset_command: return vendor_reset_command raise SCons.Errors.StopError( VENDOR_RESETBuilderError, "Could not find Arm Assember")
def generate(env): env["PAR_COMMAND"] = _detect(env) config = utils.read_config(env) par_utils.create_par_dir(config) par_file = par_utils.get_par_filename(config) flag_string = par_utils.get_build_flags_string(config) env.SetDefault( PAR_OUTFILE = par_file, PAR_FLAGSTRING = flag_string, PAR_COM = "$PAR_COMMAND $PAR_FLAGSTRING $PAR_SOURCES $PAR_TARGETS" ) env.AddMethod(PAR, 'par') return None
def generate(env): env["TRACE_COMMAND"] = _detect(env) config = utils.read_config(env) trace_utils.create_trace_dir(config) trace_file = trace_utils.get_trace_filename(config) flag_string = trace_utils.get_build_flags_string(config) env.SetDefault( TRACE_OUTFILE = trace_file, TRACE_FLAGSTRING = flag_string, TRACE_COM = "$TRACE_COMMAND $TRACE_FLAGSTRING $TRACE_SOURCES", TRACE_COMSTR = "" ) env.AddMethod(TRACE, 'trace') return None
def generate(env): env["BITGEN_COMMAND"] = _detect(env) config = utils.read_config(env) bitgen_utils.create_bitgen_dir(config) bitgen_file = bitgen_utils.get_bitgen_filename(config) script_file = bitgen_utils.create_script(config) env.SetDefault( BITGEN_OUTFILE = bitgen_file, BITGEN_SCRIPT_NAME = script_file, BITGEN_COM = "$BITGEN_COMMAND -f $BITGEN_SCRIPT_NAME $BITGEN_SOURCES $BITGEN_OUTFILE", BITGEN_COMSTR = "" ) env.AddMethod(BITGEN, "bitgen") return None
def show_main(): """ 显示功能列表 return type_opts dict:操作类型信息 """ os.system("clear") type_opts = utils.read_config(ctype="optype") print utils.c("==============================================================", 'y') print utils.c("操作类型\t| 说明") print utils.c("--------------------------------------------------------------", 'y') for optype in type_opts.iterkeys(): print "%s\t| %s" % (utils.c(optype, 'g'), utils.c(type_opts[optype])) print "%s\t\t| 退出此程序" % utils.c("exit ", 'g') print utils.c("==============================================================", 'y') return type_opts
def load_config(self): # Check if we have to reload the config file for folder in self.window.folders(): config_file = os.path.join(folder, MODULES_CONFIG) if not os.path.exists(config_file): continue if not folder in self.config or self.config[folder]['timestamp'] < os.path.getmtime(config_file): print("Loading config: %s"%config_file) self.config[folder] = { "timestamp": os.path.getmtime(config_file), "data": read_config(config_file) } return self.config
def clean_build(env): config = utils.read_config(env) base_dir = utils.get_project_base() build_dir = utils.get_build_directory(config, absolute = True) xmsgs_dir = os.path.join(base_dir, "_xmsgs") xlnx_auto = os.path.join(base_dir, "xlnx_auto_0_xdb") config_log = os.path.join(base_dir, "config.log") xdevice_details = os.path.join(base_dir, "xilinx_device_details.xml") map_report = "%s_map.xrpt" % config["top_module"] map_report = os.path.join(base_dir, map_report) par_usage = os.path.join(base_dir, "par_usage_statistics.html") par_report = "%s_par.xrpt" % config["top_module"] par_report = os.path.join(base_dir, par_report) #Coregen coregen_log = os.path.join(base_dir, "coregen.log") print "Removing Directories/Files:" if os.path.exists(build_dir): print "\t%s" % build_dir shutil.rmtree(build_dir) if os.path.exists(xmsgs_dir): print "\t%s" % xmsgs_dir shutil.rmtree(xmsgs_dir) if os.path.exists(xlnx_auto): print "\t%s" % xlnx_auto shutil.rmtree(xlnx_auto) if os.path.exists(config_log): print "\t%s" % config_log os.remove(config_log) if os.path.exists(xdevice_details): print "\t%s" % xdevice_details os.remove(xdevice_details) if os.path.exists(map_report): print "\t%s" % map_report os.remove(map_report) if os.path.exists(par_usage): print "\t%s" % par_usage os.remove(par_usage) if os.path.exists(par_report): print "\t%s" % par_report os.remove(par_report) if os.path.exists(coregen_log): print "\t%s" % coregen_log os.remove(coregen_log)
def find_prometheus(env): """ Returns the path to the assembler from the configuration file Args: config (dictionary): configuration dictionary Return: (string): Path to the assembler Raises: Nothing """ config = utils.read_config(env) i2e_path = os.path.join(config["img2elf_path"], "img2elf") return i2e_path