def rewrite(name, mfcc_dir, mfcc_scp_dir, var_pool): os.makedirs(mfcc_dir, exist_ok=True) with open(name + '.json', 'r') as fp: utt_id2mfcc = json.load(fp) ##################### for scp in glob(p_join(mfcc_scp_dir, 'raw_mfcc_{}.*.scp'.format(name))): #for i in range(1): # scp = './mfcc_scp_total_default.amend_path/raw_mfcc_voxceleb1_test.1.scp' ##################### with open(scp, 'r') as scp_f: utt_ids = sorted([line.split(' ')[0] for line \ in scp_f.read().split('\n') if len(line) > 0]) new_scp = p_join(mfcc_dir, basename(scp)) new_ark = new_scp[:-4] + '.ark' #print(utt_ids[0], len(utt_ids), new_scp, new_ark) with WriteHelper('ark,scp:{},{}'.format(new_ark, new_scp)) as writer: for i in range(len(utt_ids)): utt_id = utt_ids[i] try: mfcc_npy = utt_id2mfcc[utt_id] if var_pool == 'average': pooled_mfcc = average_pool(np.load(mfcc_npy)) elif var_pool == 'max': pooled_mfcc = max_pool(np.load(mfcc_npy)) else: print('different_var_pool:', var_pool) writer[utt_id] = pooled_mfcc print(scp, '--', var_pool, "_pooled_mfcc:", pooled_mfcc.shape) except KeyError: #writer[utt_id] = np.load(p_join('./tmp_wav', utt_id + '.npy')) print('KeyError:', utt_id)
def cmp_save_potatoes_f(ds_col, test=False): start = datetime.now() ecs, ds_col_r, csv_fn = conf_cmp_potatoes_f(ds_col, test) dn = p_join(DATASETDIR, ds_col_r, EVAL_SUB_DIR) os.makedirs(dn, exist_ok=True) fn_fdf = p_join(dn, csv_fn) # test whether filesystem-wise everything is OK fn_fdf_p = Path(fn_fdf) fn_fdf_p.touch() fn_fdf_p.unlink() print(f"\nevaluations will be written to:\n{fn_fdf}") df = Ode(ecs).eval_save(fn_fdf) df[EvalConf.ODM_COL] = df[EvalConf.ODM_COL].astype(str) print(f"wrote file {fn_fdf}") Ode.print_df(df) end = datetime.now() print("runtime:", f"start: {start}", f"end: {end}", f"duration: {end-start}", sep="\n") return fn_fdf, df
def draw_table(data_dir, config_dir, out_dir, name): yml_config = load_yml_config(p_join(config_dir, f"{name}.yml")) data_config, table_config = yml_config.data, yml_config.table all_combinations = list(product(data_config.top_levels, data_config.specializations, data_config.group_ids, data_config.variants, data_config.aggregates)) group_headers = extract_information(all_combinations, table_config.group_headers) subgroup_headers = extract_information(all_combinations, table_config.subgroup_headers) subgroup_data = load_subgroup_data( data_dir, group_headers, data_config, table_config.subgroup_data ) color_modes = {key: getattr(table_config.color_modes.values, key) for key in group_headers} figure = draw_double_header_table(group_headers, subgroup_headers, subgroup_data, color_modes, data_config) makedirs(out_dir, exist_ok=True) figure.savefig(p_join(out_dir, f"{name}.png"), dpi=240) plt.clf() plt.cla() plt.close(figure)
def save_students(self, tutorial_id, students): directory = ensure_folder_exists(p_join(self._meta_path, "students")) path = p_join(directory, f'students_{tutorial_id}.json') with open(path, 'w') as fp: out_data = [ student.to_json_dict() for student in students[tutorial_id] ] j_dump(out_data, fp, indent=4)
def load_students(self, tutorial_id): directory = ensure_folder_exists(p_join(self._meta_path, "students")) path = p_join(directory, f'students_{tutorial_id}.json') result = list(), "Missing" if os.path.exists(path): with open(path, 'r') as fp: result = [Student.from_json(student) for student in j_load(fp)], "Loaded" return result
def save(self, path, id_=None): if id_ is None: torch.save(self.net.state_dict(), p_join(path, 'model.pth')) state = {'optimizer': self.optimiser.state_dict()} torch.save(state, p_join(path, 'state.pth')) else: torch.save(self.net.state_dict(), p_join(path, f'model_{id_}.pth')) state = {'optimizer': self.optimiser.state_dict()} torch.save(state, p_join(path, f'state_{id_}.pth'))
def load(self, path, id_=None): if id_ is None: self.net.load_state_dict(torch.load(p_join(path, 'model.pth'))) state = torch.load(p_join(path, 'state.pth')) self.optimiser.load_state_dict(state['optimizer']) else: self.net.load_state_dict( torch.load(p_join(path, f'model_{id_}.pth'))) state = torch.load(p_join(path, f'state_{id_}.pth')) self.optimiser.load_state_dict(state['optimizer'])
def save_exchanged_students(self, students, mode): if mode == 'imported': file_name = "imported_students.json" elif mode == 'exported': file_name = "exported_students.json" else: raise ValueError(f"Unknown mode '{mode}' (storage.py: save_exchanged_students)") directory = ensure_folder_exists(p_join(self._meta_path, "students")) path = p_join(directory, file_name) with open(path, 'w') as fp: j_dump(students, fp, indent=4)
def __init__(self, data_dir, config_dir, out_dir, file_name): self.data_dir = data_dir self.config_dir = config_dir self.out_dir = out_dir self.file_name = file_name self._data_file = p_join(data_dir, file_name + ".json") self._config_file = p_join(config_dir, file_name + ".yml") self._out_file = p_join(out_dir, file_name + ".svg") if not p_exists(p_dirname(self._out_file)): makedirs(p_dirname(self._out_file))
def subparagraph_classification(training_folder, testing_folder): # label subparagraphs as perfect and bad, training set in two folders perfect_label = 'perfect' bad_label = 'bad' # the following line is for cross validation: # paragraph_subparagraph_folders = os.path.join(os.path.join(Path_extracted1, 'All_subparagraph_folders'), subject) perfect_path = [p_join(training_folder, perfect_label), perfect_label] bad_path = [p_join(training_folder, bad_label), bad_label] SOURCES = [perfect_path, bad_path] subparagraph_classification_result = [] for paragraph in listdir(testing_folder): paragraph = p_join(testing_folder, paragraph) classifier_result = supervised_classifier_ngram(SOURCES, paragraph) subparagraph_classification_result = subparagraph_classification_result + classifier_result return subparagraph_classification_result
def get_all_compile_commands(path: str) -> GeneratorType: """从compile_commands.json中获取编译选项,并将相对路径转为绝对路径""" db = CompilationDatabase.fromDirectory(path) commands = db.getAllCompileCommands() for cmd in commands: directory = cmd.directory arguments = [] for arg in cmd.arguments: if arg.startswith('-I') and arg[2] != '/': arguments.append('-I' + abspath(p_join(directory, arg[2:]))) else: arguments.append(arg) if not isabs(arguments[-1]): arguments[-1] = abspath(p_join(directory, arguments[-1])) yield arguments
def __call__(self, *args): exercise_number, debug = self._parse_arguments(args) if debug: self.printer.confirm("Running in debug mode.") finished_folder = self._storage.get_finished_folder(exercise_number) feedback_file_name = f"{self._storage.muesli_data.feedback.file_name}.txt" meta_file_name = "meta.json" with EMailSender(self._storage.email_account, self._storage.my_name) as sender: for directory in os.listdir(finished_folder): students = list() with open(p_join(finished_folder, directory, meta_file_name), 'r', encoding="utf-8") as fp: meta = SimpleNamespace(**j_load(fp)) for muesli_id in meta.muesli_ids: try: student = self._storage.get_student_by_muesli_id(muesli_id) students.append(student) except ValueError: self.printer.error(f"Did not find student with id {muesli_id}, maybe he left the tutorial?") feedback_path = p_join(finished_folder, directory, feedback_file_name) message = list() message.append("Dieses Feedback ist für:") for student in students: message.append(f"• {student.muesli_name} ({student.muesli_mail})") message.append("") message.append("Das Feedback befindet sich im Anhang.") message.append("") message.append(f"LG {self._storage.my_name_alias}") message = "\n".join(message) student_names = ', '.join([student.muesli_name for student in students]) self.printer.inform(f"Sending feedback to {student_names} ... ", end='') try: sender.send_feedback( students, message, feedback_path, self._storage.muesli_data.exercise_prefix, exercise_number, debug=debug ) self.printer.confirm("[Ok]") except BaseException as e: self.printer.error(f"[Err] - {e}")
def get_htr_logger(name): with open('../configs/logging_config.yaml', 'r') as f: make_directories_for_file(p_join("../logs/info.log")) config = yaml.safe_load(f.read()) logging.config.dictConfig(config) return logging.getLogger(name)
def load_from_dir(directory) -> list: """把一个目录下所有由pickle.dump序列化的数据load进列表中""" ml = [] for file in os.listdir(directory): with open(p_join(directory, file), 'rb') as fp: ml.append(pickle.load(fp)) return ml
def __call__(self, *args): exercise_number = int(args[0]) finished_folder = self._storage.get_finished_folder(exercise_number) meta_file_name = "meta.json" data = defaultdict(dict) for directory in os.listdir(finished_folder): with open(p_join(finished_folder, directory, meta_file_name), 'r', encoding="utf-8") as fp: meta = SimpleNamespace(**j_load(fp)) for muesli_id in meta.muesli_ids: student = self._storage.get_student_by_muesli_id(muesli_id) data[student.tutorial_id][muesli_id] = meta.credits_per_task for tutorial_id, student_data in data.items(): tutorial = self._storage.get_tutorial_by_id(tutorial_id) self.printer.inform( f"Uploading credits to {tutorial.time} for {len(student_data.keys()):>3d} students ... ", end='' ) exercise_id = self._muesli.get_exercise_id( tutorial_id, self._storage.muesli_data.exercise_prefix, exercise_number ) status, number_of_changes = self._muesli.upload_credits(tutorial_id, exercise_id, student_data) if status: self.printer.confirm("[Ok]", end="") self.printer.inform(f" Changed {number_of_changes:>3d} entries.") else: self.printer.error("[Err]") self.printer.error("Please check your connection state.")
def __init__(self, options_screen, parent=None): #load ui QtWidgets.QDialog.__init__(self, parent) self.ui = uic.loadUi(p_join(dirname(__file__), 'TrainingScreen.ui'), baseinstance=self) #copy values self.digits, self.questions = (options_screen.DigitsSlider.value(), options_screen.QuestionsSlider.value()) self.options_screen = options_screen #initialize values self.correct_answer = 0 self.time = 0.0 self.count_mode = -1 self.memory_time = (MEMORY_DURATION + (self.digits - 6) * MEMORY_ADDITIONAL_TIME) self.elapsed_times = [] #make questions self.que_gen = (str(random())[2:2 + self.digits] for x in range(self.questions)) self.last_que = next(self.que_gen) #init and run update timer self.time_update_cycle = 10 #ms self.time_updater = QTimer() self.time_updater.timeout.connect(self.updateTime) self.time_updater.start(self.time_update_cycle) #show ui self.ui.show() self.startQuestion()
def load_student_name_matches(self): path = p_join(self._meta_path, f'04_student_name_matches.json') result = dict() if os.path.exists(path): with open(path, 'r') as fp: result = j_load(fp) return result
def save_tutorial_data(self, tutorials): path = p_join(self._meta_path, f'03_tutorials.json') with open(path, 'w') as fp: j_dump({k: v.to_json() for k, v in tutorials.items()}, fp, indent=4)
def parsing_cmd(cmd_line, prefix=None, log=None): cmds = cmd_line.split() with open(log, 'a') as log_f: NF = len(cmds) if NF == 0: print('Null string input for parsing_cmd()', file=log_f) raise Exception dst = p_join(prefix, cmds[0] + '.wav') try: os.remove(log_file) except OSError: pass if NF == 2: #print('cp ', cmds[1], dst) print('cp ', cmds[1], dst, file=log_f) copyfile(cmds[1], dst) elif NF > 3: if cmds[-1] == "|": end = len(cmds) - 2 elif cmds[-1] == "-|": end = len(cmds) - 1 #print(' '.join(cmds[1:end]) + ' ' + dst) print(' '.join(cmds[1:end]) + ' ' + dst, file=log_f) os.system(' '.join(cmds[1:end]) + ' ' + dst)
def extract_mfcc(name, original_mfcc_dir, mfcc_npy_root_dir): utt_id2mfcc = {} for scp in glob(p_join(original_mfcc_dir, 'raw_mfcc_{}.*.scp'.format(name))): num = scp.split('.')[-2] print('extract:', scp) mfcc_npy_dir = p_join(mfcc_npy_root_dir, name + '.' + num) os.makedirs(mfcc_npy_dir, exist_ok=True) with ReadHelper('scp:' + scp) as reader: for utt_id, mfcc in reader: #print(utt_id, mfcc.shape) mfcc_npy = p_join(mfcc_npy_dir, utt_id + '.npy') #print(mfcc_npy) np.save(mfcc_npy, mfcc) utt_id2mfcc[utt_id] = mfcc_npy return utt_id2mfcc
def load_csv(name, proto=None, evs=1, rep=1): if not proto: proto = name data_frame = pd.read_csv(p_join('dataset', f'logs_{evs}_{rep}', 'pcap', f'{name}.csv'), parse_dates=[0], date_parser=epoch_parser) data_frame['Traffic'] = proto data_frame.rename(inplace=True, columns={ 'frame.time_epoch': 'datetime', '_ws.col.Source': 'source', '_ws.col.Destination': 'destination', '_ws.col.Protocol': 'protocol', '_ws.col.Length': 'length', '_ws.col.Info': 'information', 'eapol.type': 'eapol_type', 'eth.src': 'eth_src', }) if name == 'sdn-hostapd': data_frame.loc[data_frame['protocol'] == 'TCP', 'Traffic'] = 'API' data_frame.loc[data_frame['protocol'] == 'HTTP', 'Traffic'] = 'API' data_frame.length = data_frame.length.apply(lambda x: x / 1000) return data_frame
def icon_path(exe: path_type, name: str): id_file_name = f'{name}.png' id_path = p_join(this_dir, 'icons', id_file_name) if not p_exists(id_path): ico_x = GetSystemMetrics(SM_CXICON) try: large, small = ExtractIconEx(exe, 0) except error: return default_icon_path if not len(large): return default_icon_path if len(small): DestroyIcon(small[0]) hdc = CreateDCFromHandle(GetDC(0)) h_bmp = CreateBitmap() h_bmp.CreateCompatibleBitmap(hdc, ico_x, ico_x) hdc = hdc.CreateCompatibleDC() hdc.SelectObject(h_bmp) hdc.DrawIcon((0, 0), large[0]) bmp_str = h_bmp.GetBitmapBits(True) img = Image.frombuffer('RGBA', (32, 32), bmp_str, 'raw', 'BGRA', 0, 1) img.save(id_path) print(f'Icon of {exe} saved in {id_path}') return id_path
def load_subgroup_data(data_dir, group_headers, data_config, subgroup_data): variants = data_config.variants group_names = {group_id: getattr(data_config.group_names, group_id) for group_id in data_config.group_ids} x_range = parse_x(data_config.x) result = dict() for group_header in group_headers: group_data = getattr(subgroup_data.values, group_header) rows = list() with open(p_join(data_dir, group_data.file), 'r', encoding='utf-8') as fp: data = {e["name"]: e for e in j_load(fp)["benchmarks"]} for x in x_range: tmp = list() data_keys = set() for variant, group_id in product(variants, data_config.group_ids): kwargs = {"x": x, "variant": variant, "group_name": group_names[group_id]} data_key = group_data.key.format(**kwargs) if data_key not in data_keys: data_keys.add(data_key) tmp.append(data[data_key]["cpu_time"]) rows.append(tmp) result[group_header] = np.round(np.array(rows), 4) return result
def sentence_classification(training_folder, testing_folder): feedback = 0 # 0 when nothing needs to change; 1 when bad is wrongly classified prob_subparagraph = "None" # problem subparagraph: bad subparagraph wrongly classified as perfect prob_col = 2 # label sentences as perfect and bad, training set in two folders perfect_label = 'perfect' bad_label = 'bad' perfect_path = [p_join(training_folder, 'perfect'), perfect_label] bad_path = [p_join(training_folder, 'bad'), bad_label] SOURCES = [perfect_path, bad_path] # define the directory with subparagraphs sentence_classification_result = [] for subparagraph in listdir(testing_folder): subparagraph = p_join(testing_folder, subparagraph) classifier_result = supervised_classifier_ngram(SOURCES, subparagraph) sentence_classification_result = sentence_classification_result + classifier_result return sentence_classification_result
def load_tutorial_data(self): path = p_join(self._meta_path, f'03_tutorials.json') result = dict(), "Missing" if os.path.exists(path): with open(path, 'r') as fp: result = {int(k): Tutorial.from_json(v) for k, v in j_load(fp).items()}, "Loaded" return result
def load_tutorial_ids(self, mode='my'): path = p_join(self._meta_path, f'02_{mode}_ids.json') result = list(), "Missing" if os.path.exists(path): with open(path, 'r') as fp: result = j_load(fp), "Loaded" return result
def load_movielens(folder='./ml-100k/', encoding='iso-8859-1'): # Get movie id to title mapping movies = {} with open(p_join(folder, 'u.item'), encoding=encoding) as f: for line in f: (m_id, m_title) = line.split('|')[0:2] movies[m_id] = m_title # Get preference data prefs = {} with open(p_join(folder, 'u.data')) as f: for line in f: (u_id, m_id, rating, ts) = line.split('\t') prefs.setdefault(u_id, dict()) prefs[u_id][movies[m_id]] = float(rating) return prefs
def load_exchanged_students(self, mode): if mode == 'imported': file_name = "imported_students.json" elif mode == 'exported': file_name = "exported_students.json" else: raise ValueError(f"Unknown mode '{mode}' (storage.py: load_exchanged_students)") directory = ensure_folder_exists(p_join(self._meta_path, "students")) path = p_join(directory, file_name) result = list() if os.path.exists(path): with open(path, 'r') as fp: result = j_load(fp) return result
def load_presented_scores(self): directory = ensure_folder_exists(p_join(self._meta_path, "students")) path = p_join(directory, "presented_information.json") result = dict(), "Missing" if os.path.exists(path): with open(path, 'r') as fp: data = j_load(fp) presented_scores = defaultdict(dict) for outer_key, outer_value in data.items(): for inner_key, inner_value in outer_value.items(): presented_scores[int(outer_key)][int(inner_key)] = inner_value presented_scores = dict(presented_scores) result = presented_scores, "Loaded" return result
def test_include(self): task_template_file = p_join( assets_path, r'bpg\insert_trial_products_temp_table.yaml') sdt = SDTemplateSQL(task_template_file) result = sdt(ns_bucket='0.25', **global_params, **self.params) self.assertIsInstance(result, str, "Not a string! What went wrong?")
def _files_glob(path, globs, trim_prefix='', realpath=False): if not path: path = '.' if realpath: path = p_realpath(path) if trim_prefix: trim_prefix = p_realpath(trim_prefix) elif trim_prefix: trim_prefix = p_normpath(trim_prefix) for globlet in globs: globresults = glob.glob(p_normpath(p_join(path, globlet))) for globresult in globresults: if trim_prefix and len(trim_prefix + _dirsep) < len(globresult) and globresult[:len(trim_prefix + _dirsep)] == trim_prefix + _dirsep: result = globresult[len(trim_prefix + _dirsep):] else: result = globresult yield unicode(result)
def run(self): d_mkpath(p_join(_configprefix, project['name'])) #GOTCHA: needed for virtualenvs, @prefix@/etc sometimes doesn't exist (even as symlink) d_install_data.run(self) _regex_sub_lines(p_join(_configprefix, project['name'], project['name'] + '.conf'), (r'^( *)basedir *:.*$', r'\1basedir: ' + _configvars['base']), (r'^( *)root *=.*$', r'\1root = %(basedir)s'), (r'^( *)lib *=.*$', r'\1lib = ' + re.sub(r'^' + _configvars['base'], r'%(basedir)s', p_join(_configpaths['purelib'], project['name']))), (r'^( *)scripts *=.*$', r'\1scripts = ' + re.sub(r'^' + _configvars['base'], r'%(basedir)s', _configpaths['scripts'])), (r'^( *)run *=.*$', r'\1run = ' + re.sub(r'^' + _configvars['base'], r'%(basedir)s', p_join(_configvars['base'], 'run', project['name']))), (r'^( *)configs *=.*$', r'\1configs = ' + re.sub(r'^' + _configvars['base'], r'%(basedir)s', p_join(_configprefix, project['name']))), (r'^( *)docs *=.*$', r'\1docs = ' + re.sub(r'^' + _configvars['base'], r'%(basedir)s', p_join(_configvars['base'], 'share', 'doc', project['name']))))
def parse(DIR, file_list, plus, minus, cluster_file="no_cluster", init=None): '''parse takes: DIR = a working directory, ususally "/home/slacr/clean_bnc" file_list = a list of files to parse, i.e. train, test plus, minus = window size parameters cluster_file = machines/path-to-python pairs in a file init = this function can be called with a data structure to use parse populates the datastructure pointed to by "data" with colocation data for each word in the corpus (i.e. for two words A and B, how often B appears within a specified window of A. This may be asymmetric if the window is asymmetric)''' doccount = 1 if init is None: data = d.boss_ds(cluster_file) else: data = init win_size = plus + minus + 1 win_target = minus window = deque([-1 for x in range(win_size)], win_size) # this inial population of teh window saves us from having to check # if window[win_target] anymore with open(p_join(DIR,file_list[0])) as fi: for line in fi: for word in tokenize(line): data.wordcount += 1 # this try/except statement is a fast way to # know if word is in our datastructure already try: host, index = data.hashwords[word] except: host, index = data.add_word(word) data.best_host = (data.best_host + 1) % len(data.channels) if window[win_target] is not -1: tmpset = set([x for i,x in enumerate(window) if i is not win_target]) data.update(window[win_target], tmpset) window.append((host, index)) # now we do the rest of the files for doc in file_list[1:]: doccount += 1 # print("working on document " + str(doccount) + " of " + str(len(file_list))) # opens a file to iterate through with open(p_join(DIR,doc)) as fi: for line in fi: for word in tokenize(line): data.wordcount += 1 try: host, index = data.hashwords[word] except: host, index = data.add_word(word) data.best_host = (data.best_host+1) % len(data.channels) tmpset = set([x for i,x in enumerate(window) if i is not win_target]) data.update(window[win_target], tmpset) # move the window 1 word foward window.append((host, index)) # bookkeeping: we need to ensure that the last word is accounted for as a target word while window[win_target] is not -1: data.wordcount += 1 tmpset = set([x for i,x in enumerate(window) if i is not win_target and x is not -1]) data.update(window[win_target], tmpset) window.append(-1) return data
# Django settings for ccms project. DEBUG = True TEMPLATE_DEBUG = DEBUG from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS from os.path import join as p_join, dirname, realpath BASE_DIR = realpath(p_join(dirname( __file__ ), '..' )) ADMINS = ( # ('Your Name', '*****@*****.**'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': p_join(BASE_DIR, 'development.db'), # Or path to database file if using sqlite3. 'USER': '', # Not used with sqlite3. 'PASSWORD': '', # Not used with sqlite3. 'HOST': '', # Set to empty string for localhost. Not used with sqlite3. 'PORT': '', # Set to empty string for default. Not used with sqlite3. } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone.
def run(self): d_install_scripts.run(self) _regex_sub_lines(p_join(_configpaths['scripts'], project['name']), (r'@configfile@', '"' + p_join(_configprefix, project['name'], project['name'] + r'.conf"')))
def _find_dirs(topdir='lib'): if p_isdir(topdir): for _dir in os.walk(topdir): if p_isfile(p_join(_dir[0], '__init__.py')): yield _dir[0]
from distutils.dist import DistributionMetadata ## GLOBAL VARS SETUP ## _newdirsep = p_realpath('.') _dirsep = '' while _newdirsep != _dirsep: # iterate to find '/' or the system equivalent _dirsep = _newdirsep _newdirsep = p_dirname(_dirsep) _dirsep = p_splitdrive(_dirsep)[1] del _newdirsep _projectpath = p_realpath('.') _configvars = sysconfig.get_config_vars() _configpaths = sysconfig.get_paths() if p_basename(_configpaths['data']) == 'usr': #GOTCHA: '[path]/usr', not only '/usr', to allow for virtualenvs... _configprefix = p_normpath(p_join(_configpaths['data'], p_pardir, 'etc')) # "[path]/usr" => "[path]/etc" ("[path]/usr/etc", FHS-friendly) else: _configprefix = p_join(_configpaths['data'], 'etc') # "[path]/[something_else]" => "[path]/[something_else]/etc" _dirsep, _projectpath, _configprefix = unicode(_dirsep), unicode(_projectpath), unicode(_configprefix) ## TO EDIT ## project = { 'description': 'Async server micro-framework for control freaks', 'hosttype': 'github', 'repotype': 'git', 'username': '******', 'author': 'Rowan Thorpe', 'author_email': '*****@*****.**', 'classifiers': [ 'Development Status :: 3 - Alpha',
DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( ('Sam Wilson', '*****@*****.**'), ) MANAGERS = ADMINS import dj_database_url DATABASES = {'default': dj_database_url.config()} from os.path import join as p_join, dirname, realpath BASE_DIR = realpath(p_join( dirname( __file__ ), '..' )) # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery.
# other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys from os.path import dirname, join as p_join sys.path.append(p_join(dirname(sys.argv[0]), '..')) from sippy.Core.EventDispatcher import ED2 from sippy.Time.MonoTime import MonoTime from sippy.Time.Timeout import Timeout from sippy.Signal import Signal from sippy.SipFrom import SipFrom from sippy.SipTo import SipTo from sippy.SipCiscoGUID import SipCiscoGUID from sippy.UA import UA from sippy.CCEvents import CCEventRing, CCEventConnect, CCEventDisconnect, CCEventTry, CCEventUpdate, CCEventFail from sippy.UasStateTrying import UasStateTrying from sippy.UasStateRinging import UasStateRinging from sippy.UaStateDead import UaStateDead from sippy.SipConf import SipConf from sippy.SipHeader import SipHeader
def main(): parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument('post_name', default=None, nargs='?', help="Name of Post. Ex: 'My Awesome Post'. \ If not provided, the script will prompt for it") parser.add_argument('-r', '--root', default=ROOT, help="Root of jekyll blog. Ex: /home/bbkane/bbkane.github.io") parser.add_argument('-ir', '--image_root', default=IMG_ROOT, help="Folder in jekyll blog that contains images. Ex: img") parser.add_argument('-pr', '--post_root', default=POST_ROOT, help="Folder in jekyll blog that contains posts. Ex: _posts") parser.add_argument('-rr', '--refresh_rate', default=REFRESH_RATE, type=int, help="Number of seconds per check. Ex: 1") parser.add_argument('-e', '--editor', default=EDITOR, help="Editor to open new post in. Must be added to path. Ex: vim." " The special value NONE can be provided to not use an editor." " If not provided, we'll try to guess a default.") parser.add_argument('-d', '--date', default=datetime.datetime.today().strftime('%Y-%m-%d'), help="date of the post (of the form YYYY-MM-DD). Defaults to current date") parser.add_argument('-ni', '--no-images', action='store_true', help="Don't poll the image dir for new images to convert to blog post links") parser.add_argument('-elp', '--edit-last-post', action='store_true', help="Re-open the last post edited") args = parser.parse_args() # Handle control+c nicely import signal def exit_(signum, frame): raise SystemExit('\nExiting...\n') signal.signal(signal.SIGINT, exit_) post_dir = p_join(args.root, args.post_root) if args.edit_last_post: last_change_time = float('inf') last_post = None for entry in os.scandir(post_dir): # NOTE: this breaks on an empty post_dir entry_last_change_time = entry.stat().st_mtime # This isn't actually giving me the most recent accessed file... print(entry_last_change_time, entry.name) if entry.is_file() and entry_last_change_time < last_change_time: last_change_time = entry_last_change_time last_post = entry.name args.post_name = last_post post_path = p_join(post_dir, args.post_name) post_title = args.post_name[:-3] # remove the .md extension else: if not args.post_name: args.post_name = input("Enter the name of your post to create: ") # jekyllify filename and create with layout info today = args.date post_title = today + '-' + args.post_name.replace(' ', '-') post_path = p_join(post_dir, post_title + '.md') if not os.path.isfile(post_path): print('Creating new blog:', post_path) with open(post_path, 'w') as post: write = lambda s: print(s, file=post) # flake8: noqa write('---') write('layout: default') write('title: ' + args.post_name) write('---') write('') else: print(post_path, 'already created.') # erase empty child folders in img_dir img_dir = p_join(args.root, args.image_root) for entry in os.scandir(img_dir): dir_path = p_join(img_dir, entry.name) if entry.is_dir() and not os.listdir(dir_path): print('Erasing empty dir: ', dir_path) os.rmdir(dir_path) # Opening the editor blocks the rest of the script if it's a terminal app, # so only open it if we're not using images if args.editor and args.editor != 'NONE': print("Copy-paste the following to open the post:") print("\n",' '* 8, args.editor, post_path, "\n") if args.no_images: try: retcode = subprocess.call([args.editor, post_path], shell=False) if retcode != 0: print("Child was terminated by signal", retcode, file=sys.stderr) else: print(args.editor, 'opened', post_path, 'successfully') except OSError as e: print("Execution failed:", e, file=sys.stderr) # create post_img_dir post_img_dir = p_join(img_dir, post_title) if not args.no_images: print('Creating new image dir:', post_img_dir) os.makedirs(post_img_dir, exist_ok=True) if PYPERCLIP_INSTALLED: print('Watching', post_img_dir, 'for changes to make links from them. Ctrl-C quits.') # keep checking post_img_dir for changes img_list = set(os.listdir(post_img_dir)) while True: time_start = time.time() new_img_list = set(os.listdir(post_img_dir)) # make new files in the dir into links for clipboard if new_img_list != img_list: links_to_create = new_img_list - img_list for img in links_to_create: parts = ('') link = '/'.join(parts) print('Copying link to clipboard:', link) pyperclip.copy(link) img_list = new_img_list # sleep until we need to refresh duration = time.time() - time_start if duration < args.refresh_rate: time.sleep(args.refresh_rate - duration) else: print("Install pyperclip for clipboard support")