def reference_scan(self, md_text): """Scan Markdown document for reference Adapted from <https://github.com/smathot/academicmarkdown> """ data = utils.read_json(self.zotquery.json_data) keys = data.keys() ref_count = 1 zot_items = [] found_cks = [] # Needs to match patter created in QUICK_COPY regexp = re.compile(r'{@([^_]*?)_(\d*?)_([A-Z1-9]{3})}') for reg_obj in re.finditer(regexp, md_text): family, date, key_end = reg_obj.groups() citekey = '{@' + '_'.join([family, date, key_end]) + '}' if key_end in found_cks: continue ref_count += 1 possible_keys = [key for key in keys if key.endswith(key_end)] if len(possible_keys) > 1: for key in possible_keys: item = data.get(key) try: if item['data']['date'] == date: key = key break except KeyError: pass else: key = possible_keys[0] zot_items.append({'key': key, 'citekey': citekey}) found_cks.append(key_end) return zot_items
def load_data(self, path="data/testData.json", language='en'): data = read_json(path) examples = data['rasa_nlu_data']['common_examples'] self.texts = [x['text'] for x in examples] self.entities = [x['entities'] for x in examples] entities_ = [{ 'entities': [(i['start'], i['end'], i['entity']) for i in j] } for j in self.entities] self.train_data = list(zip(self.texts, entities_)) self.language = language
def load_data(self, path="dataset.json"): # Get data train_data = read_json(path) examples = train_data['rasa_nlu_data']['common_examples'] self.texts = [x['text'] for x in examples] self.intents = [x['intent'] for x in examples] # Set label binarizer self.label_binarizer = LabelBinarizer() self.label_binarizer.fit(self.intents) self.classes = self.label_binarizer.classes_
def open_attachment(self): """Open item's attachment in default app""" if os.path.isfile(self.arg): subprocess.check_output(['open', self.arg]) # if self.input is item key else: data = utils.read_json(self.zotquery.json_data) item_id = self.arg.split('_')[1] item = data.get(item_id, None) if item: for att in item['attachments']: if os.path.exists(att['path']): subprocess.check_output(['open', att['path']])
def search_new(self): """Show only the newest added items. """ old_data = utils.read_json(self.wf.datafile('backup.json')) old_keys = old_data.keys() current_data = utils.read_json(self.zotquery.json_data) current_keys = current_data.keys() # Get list of newly added items new_keys = list(set(current_keys) - set(old_keys)) if new_keys != []: for key in new_keys: # Get JSON info for that item item = current_data.get(key, None) if item: # Prepare dictionary for Alfred alfred = self._prepare_item_feedback(item) self.wf.add_item(**alfred) else: self.wf.add_item('No new items!', 'No newly added items in your Zotero library.', icon='icons/n_error.png')
def search_items(self): """Search individual items. """ # Get JSON data of user's Zotero library data = utils.read_json(self.zotquery.json_data) # Get keys of all items that match specific query keys = self._get_items() for key_rank in keys: # Ignore rank score in (rank, key) key = key_rank[-1] # Get JSON info for that item item = data.get(key, None) if item: # Prepare dictionary for Alfred alfred = self._prepare_item_feedback(item) self.wf.add_item(**alfred)
def search_for_items(scope, query): # Generate appropriate sqlite query sqlite_query = make_item_sqlite_query(scope, query) config.log.info('Item sqlite query : {}'.format(sqlite_query)) # Run sqlite query and get back item keys item_keys = run_item_sqlite_query(sqlite_query) # Get JSON data of user's Zotero library data = utils.read_json(zq.backend.json_data) results_dict = [] for key in item_keys: item = data.get(key, None) if item: # Prepare dictionary for Alfred formatter = ResultsFormatter(item) alfred_dict = formatter.prepare_item_feedback() results_dict.append(alfred_dict) return results_dict
def search_in_groups(self): """Search for items within selected group. """ # Get name of group stored in cache group = self._get_group_name() # Get keys of all items that match query in `group` keys = self._get_in_group(group) # Get JSON data of user's Zotero library data = utils.read_json(self.zotquery.json_data) for key in keys: # Ignore rank score key = key[-1] # Get JSON info for that item item = data.get(key, None) if item: # Prepare dictionary for Alfred alfred = self._prepare_item_feedback(item) self.wf.add_item(**alfred)
def setup_logging(save_dir, log_config='logger/logger_config.json', default_level=logging.INFO): """ Setup logging configuration """ log_config = Path(log_config) if log_config.is_file(): config = read_json(log_config) # modify logging paths based on run config for _, handler in config['handlers'].items(): if 'filename' in handler: handler['filename'] = str(save_dir / handler['filename']) logging.config.dictConfig(config) else: print("Warning: logging configuration file is not found in {}.".format( log_config)) logging.basicConfig(level=default_level)
def __init__(self, args, options='', timestamp=True): # parse default and custom cli options for opt in options: args.add_argument(*opt.flags, default=None, type=opt.type) args = args.parse_args() if args.device: os.environ["CUDA_VISIBLE_DEVICES"] = args.device if args.resume: self.resume = Path(args.resume) self.cfg_fname = self.resume.parent / 'config.json' else: msg_no_cfg = "Configuration file need to be specified. Add '-c config.json', for example." assert args.config is not None, msg_no_cfg self.resume = None self.cfg_fname = Path(args.config) # load config file and apply custom cli options config = read_json(self.cfg_fname) self._config = _update_config(config, options, args) # set save_dir where trained model and log will be saved. save_dir = Path(self.config['trainer']['save_dir']) timestamp = datetime.now().strftime( r'%m%d_%H%M%S') if timestamp else '' exper_name = self.config['name'] self._save_dir = save_dir / 'models' / exper_name / timestamp self._log_dir = save_dir / 'log' / exper_name / timestamp self.save_dir.mkdir(parents=True, exist_ok=True) self.log_dir.mkdir(parents=True, exist_ok=True) # save updated config file to the checkpoint dir write_json(self.config, self.save_dir / 'config.json') # configure logging module setup_logging(self.log_dir) self.log_levels = { 0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG }
def search_within_group(scope, query): group_type = scope.split('-')[-1] # Read saved group info path = config.WF.cachefile('{}_query_result.txt'.format(group_type)) group_id = utils.read_path(path) group_name = get_group_name(group_id) sqlite_query = make_in_group_sqlite_query(scope, query, group_name) config.log.info('Item sqlite query : {}'.format(sqlite_query)) # Run sqlite query and get back item keys item_keys = run_item_sqlite_query(sqlite_query) # Get JSON data of user's Zotero library data = utils.read_json(zq.backend.json_data) results_dict = [] for key in item_keys: item = data.get(key, None) if item: # Prepare dictionary for Alfred formatter = ResultsFormatter(item) alfred_dict = formatter.prepare_item_feedback() results_dict.append(alfred_dict) return results_dict
def generate_data(self): """Create a genererator with dictionaries for each item in ``json_data``. :returns: ``list`` of ``dicts`` with all item's data as ``strings`` :rtype: :class:`genererator` """ json_data = utils.read_json(self.json_data) # for each `item`, get its data in dict format for item in json_data.itervalues(): array = list() # get search columns from scope columns = config.FILTERS.get('general', None) if columns: for column in columns: # get search map from column json_map = config.FILTERS_MAP.get(column, None) if json_map: # get data from `item` using search map array.append({column: self.get_datum(item, json_map)}) yield array
import argparse import json from lib import utils parser = argparse.ArgumentParser(description='\nThis program Read JSON file... ') parser.add_argument('file name',help='Add JSON file name') args = parser.parse_args() file_name = getattr(args,'file name') json_data = utils.read_json(file_name) print(json.dumps(json_data, indent=4, sort_keys=True))
def process(file_path, new_screen_name): if not os.path.isfile(file_path): sys.exit('File not found') data = read_json(file_path) new_data = dict() new_data['layout'] = data['layout'] new_data['info'] = { 'versionCode': data['info']['versionCode'], 'versionName': data['info']['versionName'], } new_data['preferences'] = { 'Display.Theme': 'WhiteHCTheme', 'EarthModel': 'WGS84', } if '-por2lan' in sys.argv: new_data['layout']['landscape'] = new_data['layout']['portrait'] if '-lan2por' in sys.argv: new_data['layout']['landscape'] = new_data['layout']['portrait'] # nocomp and onlycomp split for orient in ['portrait', 'landscape']: if orient not in new_data['layout']: continue if '-nocomp' in sys.argv: new_data['layout'][orient] = [ i for i in new_data['layout'][orient] if 'org.xcontest.XCTrack.navig.TaskCompetition' not in i['navigations'] ] if '-onlycomp' in sys.argv: new_data['layout'][orient] = [ i for i in new_data['layout'][orient] if 'org.xcontest.XCTrack.navig.TaskCompetition' in i['navigations'] ] # portrait and landscape modes if '-por' in sys.argv: if 'landscape' in new_data['layout']: del new_data['layout']['landscape'] new_data['preferences']['Display.Orientation'] = 'PORTRAIT' elif '-lan' in sys.argv: if 'portrait' in new_data['layout']: del new_data['layout']['portrait'] new_data['preferences']['Display.Orientation'] = 'LANDSCAPE' else: sys.exit('Please specify either -por (portrait) or -lan (landscape)') sort_widgets_by_name(new_data['layout']) ensure_dir('screens') target_file = os.path.join('screens', new_screen_name + '.xcfg') write_json(target_file, new_data)