def get_configuration_file_data(name, folder=None, candidate=False): """Return the content of a configuration file data. You can specify a ``folder`` name to look for this configuration file in a configuration folder instead of the root folder. If ``name`` does not correspond to any setting, return `None`. If ``folder`` does not correspond to any setting, also return `None`. If ``candidate`` argument is set to `True`, then no check is done to ensure the folders exists. :param str name: Name of the configuration file. :param str folder: Optional name of a configuration sub-folder. :rtype: dict or None """ configs = get_configuration_files(name, folder, candidate) if not configs and not candidate: return None data = {} for path in configs: if not os.path.isfile(path): continue with open(path, 'r') as fh: data.update(cson.load(fh)) return data
def read_file(self): with open(self.file_name,'r') as weekfile: self.file_data = cson.load(weekfile) weekfile.seek(0) # ??? why set 0 used else where? self.write_data = weekfile.read() self.content_data = self.file_data[unicode("content", "utf-8")] return self.file_data
def main(): # boostnote_path = "./" boostnote_path = "C:\\Users\\kyet\\Downloads\\boostnote-mobile\\" #boostnote_path = "C:\\Users\\kyet\\Downloads\\boostnote\\" note_path = boostnote_path + "notes" attachment_path = boostnote_path + "attachments" images_path = boostnote_path + "images" typora_path = os.path.join(boostnote_path, "typora") f_dict = parse_folders(boostnote_path) #print(f_dict) file_list = os.listdir(note_path) #print(file_list) for filename in file_list: with open(os.path.join(note_path, filename), "r", encoding="UTF-8") as f: note = cson.load(f) if note["isTrashed"] == "true": continue # skip it if note["type"] == "MARKDOWN_NOTE": parse_note(f_dict, note, note_path, attachment_path, images_path, typora_path) else: # snippet parse_snippet(f_dict, note, note_path, typora_path)
def get_args(cls, log_dir): if not os.path.exists(log_dir + '/args.cson'): raise Exception('No args.cson file found in {}'.format(log_dir)) with open(log_dir + '/args.cson', 'r') as f: args = cson.load(f) return args
def check_cson(filehandle): try: _ = cson.load(filehandle) if _: return True except ValueError: return False return False
def load_cson(file_path, prefix=os.curdir): if prefix: file_path = os.path.join(prefix, file_path) if not os.path.exists(file_path): raise Exception('cson file [%s] not exists.', file_path) f = open(file_path, 'r', encoding='utf-8') data = cson.load(f) f.close() return data
def test_parser(): srcdir = os.path.join(os.path.split(__file__)[0], 'parser') for name in os.listdir(srcdir): if not name.endswith('.cson'): continue cson_fname = os.path.join(srcdir, name) with open(cson_fname, 'rb') as fin: c = cson.load(fin) json_name = name[:-5] + '.json' with open(os.path.join(srcdir, json_name), 'rb') as fin: j = json.loads(fin.read().decode('utf-8')) assert c == j with open(os.path.join(srcdir, json_name), 'rb') as fin: c = cson.load(fin) assert c == j
def pair_recipes(masterdir): recipedir = "{}/recipe-converter/recipes/".format(masterdir) atomdir = "{}/object-fetcher/atoms/".format(masterdir) for root, dirs, files in os.walk( recipedir): # checks all files and folders in the base folder for file in files: CSONfile = cson.load(file) if file.find('template_name: "{}"\n'.format(oname)) != -1: print(" found {}".format(oname))
def send_app_config(self, app_name, file_upload, format_file="front_config", stage="staging"): config_name = "" formatFile = "&format=cson" files = None data = None if format_file == "front_config": formatFile = "&format=cson" files = { "file": io.StringIO(json.dumps({"json": cson.load(file_upload)})) } elif format_file == "etl_config": config_name = "/etl" formatFile = "&format=cson" data = file_upload elif format_file == "report": config_name = "/report" formatFile = "&format=cson" data = file_upload elif format_file == "dashboard-Group": config_name = "/dashboard-Group" formatFile = "&format=cson" data = file_upload elif format_file == "augment.py": config_name = "/augment" files = {"file": file_upload} formatFile = "" elif format_file == "preprocess_validation": config_name = "/preprocess_validation" formatFile = "&format=cson" data = file_upload elif format_file == "permissions.py": config_name = "/permissions" files = {"file": file_upload} formatFile = "" elif format_file == "permissions_config": config_name = "/permissions_config" formatFile = "&format=cson" data = file_upload elif format_file == "notifications_handlers.py": config_name = "/notifications_handlers" files = {"file": file_upload} formatFile = "" req = requests.put( f"{self.url_api}/{app_name}/{self.__url_config}{config_name}?stage={stage}{formatFile}", headers=self.get_headers(), files=files, data=data, ) req.raise_for_status() return req.json()
def __init__(self, cson_file_path): data = cson.load(open(cson_file_path)) self._obj_type = data['type'] self._title = data['title'] self._folder_id = data['folder'] self._is_trashed = data['isTrashed'] if self.obj_type == self.TYPE_TEXT_MARKDOWN: self._content = data['content'] else: self._content = self.build_snippet_content(data)
def get_file_dir_and_name(file): file = open(file, "r") notes = cson.load(file) if notes["type"] == "MARKDOWN_NOTE": directory = { "folder": notes["folder"], "name": notes["title"], "content": notes["content"] } else: directory = None file.close() return directory
def create_history( location=os.path.join(home, 'Boostnote') ): files = {} for note in get_notes(): files[note.split('/')[-1]] = { 'title': cson.load(open(note, 'r'))['title'], 'updated': False } json.dump( files, open(os.path.join(location, 'history.json'), 'w+') )
def main() -> int: parser = argparse.ArgumentParser() parser.add_argument('filename') parser.add_argument('output') args = parser.parse_args() with open(args.filename) as src: contents = cson.load(src) with open(args.output, 'wb') as dest: plistlib.dump(contents, dest) return 0
def load_atom_config(): # Load atom config returns atom_config dict try: with open(atom_config_uri, 'rb') as f: atom_config = cson.load(f) # Lets back up the config file just in case config_backup = os.path.join(theme_historydir, 'config.cson.bak') with open(config_backup, 'w') as f: f.write(cson.dumps(atom_config, indent=4)) return atom_config except Exception as e: print("Unable to read atom config file, exiting.") print('{}'.format(e)) sys.exit()
def load(*args, **kwargs): """ Wrapper for #!cson.load(). Accepts an additional *filename* parameter that will be included in the error message. """ filename = kwargs.pop('filename', None) try: return _cson.load(*args, **kwargs) except _cson.ParseError as exc: msg = 'parse error: {}'.format(exc) if filename: msg = '{}: {}'.format(filename, msg) raise Error(msg, exc)
def buildNotesJson(): notes = [] rootdir = ('../Boostnote/notes') for folder, dirs, files in os.walk(rootdir): for file in files: if file.endswith('.cson'): fullpath = os.path.join(folder, file) with open(fullpath, 'r') as f: #fullpath = os.path.join(folder, file) note = cson.load(f) notes.append(note) print(json.dumps(notes)) with open('./notes.json', 'w+') as outfile: json.dump(notes, outfile)
def extract_md_from_BoostNote(): """Extract Markdown from BoostNote """ cnt_success = 0 cnt_skip = 0 root = tkinter.Tk() root.withdraw() msg = 'Select your BoostNote working directory' boostnote = tkinter.filedialog.askdirectory(title=msg) conf_json = os.path.join(boostnote, 'boostnote.json') with open(conf_json) as f: conf = json.load(f) notes = os.path.join(boostnote, 'notes') for file in os.listdir(notes): with open(os.path.join(notes, file)) as f: note = cson.load(f) if note['type'] != 'MARKDOWN_NOTE': cnt_skip += 1 continue key = note['folder'] folder = convert_to_name(key, conf) title = note['title'] content = note['content'] if note['isTrashed']: folder = 'Trash' folder = sanitize(folder) title = sanitize(title) output_dir = os.path.join(boostnote, 'markdown', folder) os.makedirs(output_dir, exist_ok=True) output_file_name = title + '.md' output_file = os.path.join(output_dir, output_file_name) with open(output_file, 'w') as f: f.write(content) cnt_success += 1 print('=============================================') print('Converting BoostNote to Markdown is Success!!') print(f'success: \t{cnt_success}') print(f'skip: \t{cnt_skip}')
def load(config_name, mode=None): # load from a config.json file_name = config_name + '.cson' file_dir = os.path.dirname(os.path.realpath(__file__)) file_path = os.path.join(file_dir, file_name) if not os.path.exists(file_path): raise ValueError('no such config file: %s' % file_path) with open(file_path) as f: config = cson.load(f) define_link(config) return config
def parse_file(filename, root_note_path): # Parse the .cson, returning a dictionary that can be accessed more easily. # root_note_path and filename must be joined into a single path to allow for files to be accessed even when main.py is not running from the same directory. with open(os.path.join(root_note_path, filename), "r", errors="ignore") as f: parsed_cson = cson.load(f) # The note title gets used as the filename, so sanitise it to remove any problem characters (e.g. \). def sanitise_title(title): forbidden_chars = ["/", "\\", "<", ">", ":", "\"", "|", "?", "*"] title = "".join( [x if x not in forbidden_chars else "_" for x in title]) return title def set_mtime(file_path, modifiedAt): os.utime(file_path, (modifiedAt, modifiedAt)) # Set all the required 'attributes' about the file. title = sanitise_title(parsed_cson["title"]) # Empty notes return a KeyError, since there is no content. Set content to empty string in this case, so they can still be exported correctly. try: content = parsed_cson["content"] except KeyError: content = "" modifiedAt = parse_time.parse_time(parsed_cson["updatedAt"]) folder = f_dict[parsed_cson["folder"]] # Create the respective folder for the note to be placed in. output_dir = os.path.join(root_note_path, "Output", folder) try: # os.makedirs will (try to) create the entire folder structure from left to right, not just the rightmost file. os.makedirs(output_dir) except FileExistsError: # Directory already made, so continue. pass # file_path = output_dir+"\\"+title+".md" file_path = os.path.join(output_dir, title + ".md") # Open a new Markdown file in its respective folder, and write the contents of the .cson file to it. with open(file_path, "w") as output: output.write(content) set_mtime(file_path, modifiedAt)
def create_history( location=os.path.join(home, 'Boostnote') ): files = {} for note in get_notes(): files[note.split('/')[-1]] = { 'title': cson.load(open(note, 'r'))['title'], 'updated': False } try: json.dump( files, open(os.path.join(location, 'history.json'), 'w+') ) except Exception as e: print( f"EXiting, got the following error {e}.\nReport the error on Github")
def main(): snippets_dir_path = join(get_cwd(), 'atom-ember-snippets', 'snippets') for filename in listdir(snippets_dir_path): snippet_path = join(snippets_dir_path, filename) if not isfile(snippet_path): # Ignore directories continue if not filename.startswith('import-'): # Process only "import-*.cson" files continue with open(snippet_path, 'rb') as cson_file: conf = cson.load(cson_file) snippets = [ convert_to_xml(description, snippet) for description, snippet in conf.values()[0].iteritems() ] save(filename, snippets)
def __init__(self): # Determine Shinjitai/Kyujitai database path current_path = os.path.abspath(os.path.dirname(__file__)) kyujitai_db_path = os.path.join(current_path, 'kyujitai.cson') # Parse Kyujitai database kyujitai_db_file = open(kyujitai_db_path, 'r', encoding="utf-8") self.kyujitai_data = cson.load(kyujitai_db_file) kyujitai_db_file.close() # Build Shinjitai to Kyujitai conversion databases self.shinjitai_to_kyujitai_database = {} self.kyujitai_to_shinjitai_database = {} # create Shinjitai/Kyujitai dictionaries for entry in self.kyujitai_data: shinjitai = entry[0] kyujitai = entry[1] self.shinjitai_to_kyujitai_database[shinjitai] = kyujitai self.kyujitai_to_shinjitai_database[kyujitai] = shinjitai
def installSnippets(self): """Adds Jumpscale snippets to your atom snippets file.""" # Note : to add more snippets you they need to be on the same 'key' # so we will do a snippets merge based on keys. print("install snippets") merged = {} snippets_existing_path = os.path.expanduser("~/.atom/snippets.cson") snippetspath = os.path.join( os.path.dirname(inspect.getfile(self.__class__)), "snippets.cson") if j.sal.fs.exists(snippets_existing_path, followlinks=True): snippets_existing = j.sal.fs.fileGetContents( snippets_existing_path) snippets_existing2 = "" for line in snippets_existing.split("\n"): if line.startswith("#") or line.strip == "": continue snippets_existing2 += line if snippets_existing2.strip == "": merged = cson.loads(snippets_existing2) with open(snippetspath) as jssnippets: snippets = cson.load(jssnippets) for k, v in snippets.items(): if k in merged: merged[k].update(snippets[k]) content = cson.dumps(merged, indent=4, sort_keys=True) else: content = j.sal.fs.fileGetContents(snippetspath) j.sal.fs.writeFile(os.path.expanduser("~/.atom/snippets.cson"), content) else: nc = j.sal.fs.fileGetContents(snippetspath) j.sal.fs.writeFile(filename=snippets_existing_path, contents=nc, append=False)
def load_notes(connector): for note in glob.glob("notes/*.cson"): notes.append(Note.from_dict(cson.load(open(note)), plugincon.connector))
def generate_job(prefix, prefer='gpu', rest_args=[], dataset=None, resume_dir=None, base_log_path=None): with open(tpl_file, 'r') as f: tpl = Template(f.read()) base_path = '/vol/bitbucket/rv1017' if base_log_path is None: base_log_path = base_path + '/log' # TODO reuse parameters if log directory exists, like `run_like.py` if resume_dir: with open(resume_dir + '/args.cson', 'r') as f: pargs = cson.load(f) else: pargs = sane_args.copy() if prefix and prefix != '': pargs['prefix'] = prefix pargs['log_dir'] = resume_dir or _gen_log_dir(base_log_path, prefix) pargs['gen_log_dir'] = False pargs['show_progress_bar'] = False prefix = pargs.get('prefix') or prefix logging.warning('Submitting with prefix {}'.format(prefix)) if pargs.get('debug'): pargs.pop('debug') data_in_rest_args =any(map(lambda s: 'data_file' in s, rest_args)) if (not pargs.get('data_file') or dataset) and not data_in_rest_args: if not dataset and not data_in_rest_args: raise Exception('No dataset specified and no data_files found in args') # dataset = 'confer-pts' logging.warning('Using dataset {}'.format(dataset)) pargs['data_file'] = base_path + '/data/{}/train.tfrecords'.format(dataset) pargs['val_data'] = base_path + '/data/{}/val.tfrecords'.format(dataset) elif data_in_rest_args: logging.warning('Relying on data files in rest args') else: logging.warning('Using data files from resume directory') conda_envs = dict(gpu='tf-gpu', cpu='tf-cpu') CUDA_ROOT = '/vol/cuda/8.0.61' conda_root = '/vol/hmi/projects/ruben/miniconda' conda_env = conda_envs[prefer] env_base = '{}/envs/{}'.format(conda_root, conda_env) env = dict( # LD_LIBRARY_PATH='/vol/hmi/projects/ruben/miniconda/lib', LD_LIBRARY_PATH='{}/lib:$ENV(CUDA_ROOT)/lib:$ENV(LD_LIBRARY_PATH)'.format(env_base), PYTHONHOME=env_base ) if prefer == 'cpu': env['CUDA_VISIBLE_DEVICES'] = "-1" tpl_args = dict( python='{}/bin/python'.format(env_base), base='/vol/bitbucket/rv1017', env=env, prefix=prefix, prefer=prefer, env_string=' '.join('{}={}'.format(k, v) for k, v in env.items()), args=dict_to_args(pargs) + ' ' + ' '.join(rest_args) ) job_desc = tpl.render(**tpl_args) return job_desc
def parse(opt_path, is_train=True): extension = osp.splitext(opt_path)[1].lower() if extension == '.json': import json # remove comments starting with '//' json_str = '' with open(opt_path, 'r') as f: for line in f: line = line.split('//')[0] + '\n' json_str += line opt = json.loads(json_str, object_pairs_hook=OrderedDict) elif extension == '.cson': import cson with open(opt_path, 'r') as f: opt = cson.load(f) elif extension == '.yml' or extension == '.yaml': import yaml with open(opt_path, 'r') as f: opt = yaml.safe_load(f) else: raise ValueError('Unknown file extension: {}'.format(extension)) opt['is_train'] = is_train scale = opt['scale'] # datasets for phase, dataset in opt['datasets'].items(): phase = phase.split('_')[0] dataset['phase'] = phase dataset['scale'] = scale is_lmdb = False if 'dataroot_HR' in dataset and dataset['dataroot_HR'] is not None: dataset['dataroot_HR'] = os.path.expanduser(dataset['dataroot_HR']) if dataset['dataroot_HR'].endswith('lmdb'): is_lmdb = True if 'dataroot_HR_bg' in dataset and dataset[ 'dataroot_HR_bg'] is not None: dataset['dataroot_HR_bg'] = os.path.expanduser( dataset['dataroot_HR_bg']) if 'dataroot_LR' in dataset and dataset['dataroot_LR'] is not None: dataset['dataroot_LR'] = os.path.expanduser(dataset['dataroot_LR']) if dataset['dataroot_LR'].endswith('lmdb'): is_lmdb = True dataset['data_type'] = 'lmdb' if is_lmdb else 'img' if phase == 'train' and 'subset_file' in dataset and dataset[ 'subset_file'] is not None: dataset['subset_file'] = os.path.expanduser(dataset['subset_file']) # path for key, path in opt['path'].items(): if path and key in opt['path']: opt['path'][key] = os.path.expanduser(path) if is_train: experiments_root = os.path.join(opt['path']['root'], 'experiments', opt['name']) opt['path']['experiments_root'] = experiments_root opt['path']['models'] = os.path.join(experiments_root, 'models') opt['path']['training_state'] = os.path.join(experiments_root, 'training_state') opt['path']['log'] = experiments_root opt['path']['val_images'] = os.path.join(experiments_root, 'val_images') # change some options for debug mode if 'debug' in opt['name']: opt['train']['val_freq'] = 8 opt['logger']['print_freq'] = 2 opt['logger']['save_checkpoint_freq'] = 8 opt['train']['lr_decay_iter'] = 10 else: # test results_root = os.path.join(opt['path']['root'], 'results', opt['name']) opt['path']['results_root'] = results_root opt['path']['log'] = results_root # network opt['network_G']['scale'] = scale # export CUDA_VISIBLE_DEVICES gpu_list = ','.join(str(x) for x in opt['gpu_ids']) os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list print('export CUDA_VISIBLE_DEVICES=' + gpu_list) return opt
def read_cson(): with open('a.cson', 'rb') as fin: obj = cson.load(fin) print(obj)
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) assert self.app fn = self.fn assert fn.lower().endswith(".cson") # data = None # for dp in self.app._data_paths: # try: # full_fn = path.join(dp, fn) # with open(full_fn, "rb") as f: # data = self.data = cson.load(f) # self.full_fn = full_fn # break # except FileNotFoundError: # pass # assert data self.full_fn = self.app.resource_path(fn) if self.full_fn is None: raise FileNotFoundError("Could not find sprite CSON for " + self.fn) with open(self.full_fn, "rb") as f: self.data = data = cson.load(f) assert data self.skins = data["skins"] # self.skin = 0 size = data.get("size", None) self.size = ivec2(size) if size else None tile_size = data.get("size", None) self.tile_size = ivec2(tile_size) if tile_size else None origin = data.get("origin", None) self.origin = vec2(origin) if origin else None mask = data.get("mask", None) if mask: self.mask = vec2(mask) self.animation_meta = data["animation"] self.frames = self.animation_meta["frames"] self.speed = self.animation_meta.get("speed", 1.0) images = [] self.layers = [[[]]] # @dataclass # class SpriteFlags: # once: bool = False class SpriteFlags: pass self.flags = {} sheet_sz = None skin_id = 0 for skin in data["skins"]: sheet = None for dp in self.app._data_paths: try: sheet = Image.open(path.join(dp, skin)) except FileNotFoundError: continue size = min(*sheet.size) # if size was not provided, approximate it if self.size is None: self.size = ivec2(size) if self.tile_size is None: self.tile_size = ivec2(size) if self.origin is None: self.origin = size / 2 sheet = sheet.convert("RGBA") assert sheet if sheet_sz is None: sheet_sz = ivec2(sheet.size) / self.tile_size self.tile_count = tile_count = sheet_sz.x * sheet_sz.y for i in range(tile_count): # crop frame from spritesheet x = (i % sheet_sz.x) * self.tile_size.x y = (i // sheet_sz.x) * self.tile_size.y L = x + self.tile_size.x b = y + self.tile_size.y img = sheet.crop((x, y, L, b)) # replace pink pixels with transparency px = img.load() for y in range(img.height): for x in range(img.width): if px[x, y] == (255, 0, 255, 255): px[x, y] = (0, 0, 0, 0) # img = Image.fromarray(pixels) images.append(img) self.layers[0][skin_id] = images skin_id += 1 # Process and store sequence flags (hflip, once) # This will generate flipped versions of tiles flipped_images = {} tile_id = Wrapper(tile_count) self.defaults = None def visit(seq, path): global frame_id i = 0 hflip, vflip = False, False # print(seq, path) for tile in seq: if tile == "hflip" or tile == "+hflip": hflip = True elif tile == "-hflip": hflip = False elif tile == "vflip" or tile == "+vflip": vflip = True elif tile == "-vflip": vflip = False elif tile == "default": name = path[-1] if not name in self.flags: self.flags[name] = SpriteFlags() self.flags[name].default = True data["default"] = path elif tile == "once": name = path[-1] if not name in self.flags: self.flags[name] = SpriteFlags() self.flags[name].once = True elif hflip: if tile not in flipped_images: for layer in self.layers: for skin in layer: img = skin[tile].copy() img = img.transpose(Image.FLIP_LEFT_RIGHT) skin.append(img) seq[i] = tile_id() # change ID to modified version flipped_images[tile] = tile_id() tile_id.value += 1 else: seq[i] = flipped_images[tile] i += 1 # remove flags from sequence seq = filter(lambda x: not isinstance(x, str), seq) # if a certain sequence is marked default, then fill it in recursive_each(list, self.frames, visit) self.animation = SpriteAnimation(self)
def get_args(cls, path, filename='args.cson'): with open(path + '/' + filename, 'r') as f: args = cson.load(f) return args
def load(self, fin): self._data = cson.load(fin) self.is_updated = False return self
#!/usr/bin/python import cson import sys def lower_case_test(data): if not isinstance(data, dict): return for (key, value) in data.items(): if key != key.lower(): print("ERROR: Keys must be in lower case") print(filename + " -> " + str(key)) exit(1) lower_case_test(value) if __name__ == '__main__': for filename in sys.argv[1:]: try: data = cson.load(filename) except: print("ERROR: " + filename + " not in CSON format") exit(2) lower_case_test(data)
def matches(name): return not sys.argv[1:] or name in sys.argv[1:] srcdir = os.path.join(os.path.split(__file__)[0], 'test') for name in os.listdir(srcdir): if not name.endswith('.cson'): continue if not matches(name): continue total += 1 cson_fname = os.path.join(srcdir, name) with open(cson_fname, 'rb') as fin: try: c = cson.load(fin) except cson.ParseError as e: print('{}({},{}): error: {}'.format(name, e.line, e.col, e.msg)) errors.append(name) continue json_name = name[:-5] + '.json' with open(os.path.join(srcdir, json_name), 'rb') as fin: j = json.loads(fin.read().decode('utf-8')) if c != j: print('error: {}'.format(name)) print(repr(c)) print(repr(j)) errors.append(name) continue
def cson_reader(location): if os.path.isfile(location): data = cson.load(open(location, 'r')) return data else: return 0
set1 = set(data['architectures'].keys()) set2 = set(data['roles'].keys()) if set1 != set2: diff = set1.symmetric_difference(set2) for s in diff: print("ERROR:" + s + " is not in architectures and roles in '" + filename + "'") exit(2) # roles and package must have the same roles roles = [] for role in data['roles'].values(): roles.extend(role) set1 = set(roles) set2 = set(data['packages'].keys()) if set1 != set2: diff = set1.symmetric_difference(set2) for s in diff: print("ERROR:" + s + " is roles and packages in '" + filename + "'") exit(2) if __name__ == '__main__': systems = cson.load(sys.argv[1]) for filename in sys.argv[2:]: try: data = cson.load(filename) except: print("ERROR: " + filename + " not in CSON format") exit(2) coherent(data)