def test_process_ini_file(tmpdir, valid_settings, invalid_settings): """Test whether ini config elements are correctly set in settings.*.""" from tokendito import helpers, settings # Create a mock config file data = '[default]\nokta_username = pytest\n\n[pytest]\n' data += ''.join('{} = {}\n'.format(key, val) for key, val in valid_settings.items()) data += ''.join('{} = {}\n'.format(key, val) for key, val in invalid_settings.items()) data += '\n[pytest_end]\n' data += ''.join('{} = {}\n'.format(key, val) for key, val in invalid_settings.items()) # Python 3.7 supports patching builtins.open(), which gives us the ability # to bypass file creation with: # mocker.patch('builtins.open', mocker.mock_open(read_data=data), create=True) # There is no (easy) way to achieve the same on earlier versions, so we create # an actual file instead. tmpdir keeps the last 3 files/dirs behind for inspection path = tmpdir.mkdir('pytest').join('pytest_tokendito.ini') path.write(data) # Ensure we fail if the section is not found with pytest.raises(SystemExit) as err: helpers.process_ini_file(path, 'expected_failure') # assert err.type == SystemExit assert err.value.code == 2 helpers.process_ini_file(path, 'pytest') # Test that correct options are set for key_name in valid_settings: assert getattr(settings, key_name) == valid_settings[key_name] # Test that incorrect options aren't set for key_name in invalid_settings: assert getattr(settings, key_name, 'not_found') == 'not_found'
def make_file_tree(spec, path): if isinstance(spec, str): path.write(spec) elif isinstance(spec, dict): path.mkdir() for nm in spec: make_file_tree(spec[nm], path.join(nm))
def encrypt_file(self, file_name): with open(file_name, 'rb') as path: plainttext = path.read() enc = self.encrypt(plainttext,self.key) with open(file_name + ".enc", 'wb') as path: path.write(enc) os.remove(file_name)
def test_download_urls_to_files_if_not_exist__when_files_not_exist( mapping, tmpdir): mapping_new = {} for k, v in mapping.items(): path = tmpdir.join(k) path.write('content') mapping_new[str(path)] = v download_urls_to_files_if_not_exist(mapping_new)
def test_download_url_to_file_if_not_exists__when_file_exists( url, file_name, tmpdir): path = tmpdir.join(file_name) path.write('content') path_str = str(path) assert os.path.isfile(path_str) download_url_to_file_if_not_exists(url, path_str) assert path.read() == 'content'
def logo_interpreter(lexer): početak = '''\ <!DOCTYPE html><html><head><title>HTML5 Canvas For Absolute Beginners | onlyWebPro.com </title><script type=\"text/javascript\"> function drawShape() {var myCanvas = document.getElementById(\"myCanvas\"); var ctx = myCanvas.getContext(\"2d\"); ctx.beginPath(); ctx.lineTo(0,0);''' kraj = '''\ ctx.stroke(); }</script> </head> <body onload=\"drawShape()\"><canvas id=\"myCanvas\" width=\"300\" height=\"300\"> </canvas></body></html>''' print("LOGO") content = početak + lexer.izvrši() + kraj izlaz = 'javascript.html' while izlaz and os.path.isfile(izlaz): izlaz = str(1) + izlaz path = open(izlaz, 'a') path.write(content) path.close()
def file_entry_apply(self, hooks_dir): path = hooks_dir.join(self.name) path.write(self.contents) path.chmod(self.mode)
def test_not_dir(tmpdir): path = tmpdir.join('not-dir.txt') path.write('') with raises(NotADirectoryError): FileSystemRepository(str(path))
if sensor_id.isdigit(): current_profile.set('Sensors', 'ibm_thermal_sensor_' + str(sensor_id), str( {'name': nname, 'triggers': ntp})) else: current_profile.set('Sensors', str(sensor_id), str( {'name': nname, 'scaling': self.sensor_scalings[sensor_id], 'triggers': ntp})) except Exception, e: print 'Error writing current profile' print e return False try: if not is_a_string_buffer: path = open(path, 'w') path.write( '# This file contains a fan profile for tpfancod') path.write('\n\n\n') current_profile.write(path) if is_a_string_buffer: self.profile_as_string = path.getvalue() except Exception, e: print 'Error writing profile file: %s' % path print e return False return True def load_config(self, settings_from_config): """apply settings from a config""" self.verify_config(settings_from_config) if settings_from_config['status']:
def run(self, path=None, predy_resid=None): """ Runs the Model """ print self.verify() if not self.verify()[0]: return False data = self.data print data if self.data['config']['other_missingValueCheck']: pysal.MISSINGVALUE = self.data['config']['other_missingValue'] else: pysal.MISSINGVALUE = None # Build up args for dispatcher # weights w_names = [w.name for w in data['mWeights'] if w.enabled] w_list = [w.w for w in data['mWeights'] if w.enabled] for w, name in zip(w_list, w_names): w.name = name wk_names = [w.name for w in data['kWeights'] if w.enabled] wk_list = [w.w for w in data['kWeights'] if w.enabled] for w, name in zip(wk_list, wk_names): w.name = name if 'fname' in self.data: fileType = self.data['fname'].rsplit('.')[-1].lower() self.fileType = fileType if fileType == 'csv': db = pysal.open(data['fname'], 'rU') else: db = pysal.open(data['fname'], 'r') else: return None # y name_y = data['spec']['y'] y = np.array([self.get_col(db, name_y)]).T # x x = [] x_names = data['spec']['X'] for x_name in x_names: x.append(self.get_col(db, x_name)) x = np.array(x).T # YE ye = [] ye_names = data['spec']['YE'] for ye_name in ye_names: ye.append(self.get_col(db, ye_name)) ye = np.array(ye).T # H h = [] h_names = data['spec']['H'] for h_name in h_names: h.append(self.get_col(db, h_name)) h = np.array(h).T mtypes = {0: 'Standard', 1: 'Spatial Lag', 2: 'Spatial Error', 3: 'Spatial Lag+Error'} model_type = mtypes[data['modelType']['mType']] print model_type # estimation methods # method_types = {0: 'gm', 1: 'ml'} method_types = {0: 'ols', 1: 'gm', 2: 'ml'} method = method_types[data['modelType']['method']] print method # R if data['spec']['R']: name_r = data['spec']['R'] r = self.get_col(db, name_r) else: name_r = None r = None # T if data['spec']['T']: name_t = data['spec']['T'] t = self.get_col(db, name_t) else: name_t = None t = None # These options are not available yet.... s = None name_s = None config = data['config'] if self.getMWeightsEnabled() and model_type in \ ['Standard', 'Spatial Lag']: LM_TEST = True else: LM_TEST = False print w_list, wk_list fname = os.path.basename(data['fname']) results = Spmodel( name_ds=fname, w_list=w_list, wk_list=wk_list, y=y, name_y=name_y, x=x, name_x=x_names, ye=ye, name_ye=ye_names, h=h, name_h=h_names, r=r, name_r=name_r, s=s, name_s=name_s, t=t, name_t=name_t, model_type=model_type, # data['modelType']['endogenous'], # data['modelType']['spatial_tests']['lm'], spat_diag=LM_TEST, white=data['modelType']['error']['white'], hac=data['modelType']['error']['hac'], kp_het=data['modelType']['error']['het'], # config..... sig2n_k_ols=config['sig2n_k_ols'], sig2n_k_tsls=config['sig2n_k_2sls'], sig2n_k_gmlag=config['sig2n_k_gmlag'], max_iter=config['gmm_max_iter'], stop_crit=config['gmm_epsilon'], inf_lambda=config['gmm_inferenceOnLambda'], comp_inverse=config['gmm_inv_method'], step1c=config['gmm_step1c'], instrument_lags=config['instruments_w_lags'], lag_user_inst=config['instruments_lag_q'], vc_matrix=config['output_vm_summary'], predy_resid=predy_resid, ols_diag=config['other_ols_diagnostics'], moran=config['other_residualMoran'], white_test=config['white_test'], regime_err_sep=config['regimes_regime_error'], regime_lag_sep=config['regimes_regime_lag'], cores=config['other_numcores'], ml_epsilon=config['ml_epsilon'], ml_method=config['ml_method'], ml_diag=config['ml_diagnostics'], method=method ).output print results for r in results: path.write(r.summary) path.write('\n\n\n') return [r.summary for r in results] """
import os import os.path lst = [] with open("path.txt", 'w') as path: os.chdir("C:/main") for current_dir, dirs, files in os.walk(""): for file in files: if file[-3:] == '.py': lst.append(current_dir) temp = set(lst) lst = list(temp) lst.sort() for i in lst: path.write(i[2:] + '\n') path.close()
def run(self, path=None, predy_resid=None): """ Runs the Model """ print self.verify() if not self.verify()[0]: return False data = self.data print data if self.data['config']['other_missingValueCheck']: pysal.MISSINGVALUE = self.data['config']['other_missingValue'] else: pysal.MISSINGVALUE = None # Build up args for dispatcher # weights w_names = [w.name for w in data['mWeights'] if w.enabled] w_list = [w.w for w in data['mWeights'] if w.enabled] for w, name in zip(w_list, w_names): w.name = name wk_names = [w.name for w in data['kWeights'] if w.enabled] wk_list = [w.w for w in data['kWeights'] if w.enabled] for w, name in zip(wk_list, wk_names): w.name = name if 'fname' in self.data: fileType = self.data['fname'].rsplit('.')[-1].lower() self.fileType = fileType if fileType == 'csv': db = pysal.open(data['fname'], 'rU') else: db = pysal.open(data['fname'], 'r') else: return None # y name_y = data['spec']['y'] y = np.array([self.get_col(db, name_y)]).T # x x = [] x_names = data['spec']['X'] for x_name in x_names: x.append(self.get_col(db, x_name)) x = np.array(x).T # YE ye = [] ye_names = data['spec']['YE'] for ye_name in ye_names: ye.append(self.get_col(db, ye_name)) ye = np.array(ye).T # H h = [] h_names = data['spec']['H'] for h_name in h_names: h.append(self.get_col(db, h_name)) h = np.array(h).T mtypes = { 0: 'Standard', 1: 'Spatial Lag', 2: 'Spatial Error', 3: 'Spatial Lag+Error' } model_type = mtypes[data['modelType']['mType']] print model_type # estimation methods # method_types = {0: 'gm', 1: 'ml'} method_types = {0: 'ols', 1: 'gm', 2: 'ml'} method = method_types[data['modelType']['method']] print method # R if data['spec']['R']: name_r = data['spec']['R'] r = self.get_col(db, name_r) else: name_r = None r = None # T if data['spec']['T']: name_t = data['spec']['T'] t = self.get_col(db, name_t) else: name_t = None t = None # These options are not available yet.... s = None name_s = None config = data['config'] if self.getMWeightsEnabled() and model_type in \ ['Standard', 'Spatial Lag']: LM_TEST = True else: LM_TEST = False print w_list, wk_list fname = os.path.basename(data['fname']) results = Spmodel( name_ds=fname, w_list=w_list, wk_list=wk_list, y=y, name_y=name_y, x=x, name_x=x_names, ye=ye, name_ye=ye_names, h=h, name_h=h_names, r=r, name_r=name_r, s=s, name_s=name_s, t=t, name_t=name_t, model_type=model_type, # data['modelType']['endogenous'], # data['modelType']['spatial_tests']['lm'], spat_diag=LM_TEST, white=data['modelType']['error']['white'], hac=data['modelType']['error']['hac'], kp_het=data['modelType']['error']['het'], # config..... sig2n_k_ols=config['sig2n_k_ols'], sig2n_k_tsls=config['sig2n_k_2sls'], sig2n_k_gmlag=config['sig2n_k_gmlag'], max_iter=config['gmm_max_iter'], stop_crit=config['gmm_epsilon'], inf_lambda=config['gmm_inferenceOnLambda'], comp_inverse=config['gmm_inv_method'], step1c=config['gmm_step1c'], instrument_lags=config['instruments_w_lags'], lag_user_inst=config['instruments_lag_q'], vc_matrix=config['output_vm_summary'], predy_resid=predy_resid, ols_diag=config['other_ols_diagnostics'], moran=config['other_residualMoran'], white_test=config['white_test'], regime_err_sep=config['regimes_regime_error'], regime_lag_sep=config['regimes_regime_lag'], cores=config['other_numcores'], ml_epsilon=config['ml_epsilon'], ml_method=config['ml_method'], ml_diag=config['ml_diagnostics'], method=method).output print results for r in results: path.write(r.summary) path.write('\n\n\n') return [r.summary for r in results] """
def note(tmpdir): path = tmpdir.join('alpha.txt') path.write('alpha\nbravo') return Note(path)
def run(self, path=None, predy_resid=None): """ Runs the Model """ print self.verify() if not self.verify()[0]: return False data = self.data print data if self.data['config']['other_missingValueCheck']: pysal.MISSINGVALUE = self.data['config']['other_missingValue'] else: pysal.MISSINGVALUE = None # Build up args for dispatcher # weights w_names = [w.name for w in data['mWeights'] if w.enabled] w_list = [w.w for w in data['mWeights'] if w.enabled] for w, name in zip(w_list, w_names): w.name = name wk_names = [w.name for w in data['kWeights'] if w.enabled] wk_list = [w.w for w in data['kWeights'] if w.enabled] for w, name in zip(wk_list, wk_names): w.name = name if 'fname' in self.data: fileType = self.data['fname'].rsplit('.')[-1].lower() self.fileType = fileType if fileType == 'csv': db = pysal.open(data['fname'], 'rU') else: db = pysal.open(data['fname'], 'r') else: return None # y y_names = data['spec']['y'] name_y = ','.join(y_names) y = np.array([self.get_col(db, name) for name in y_names]).T # x x = [] x_names = data['spec']['X'] for x_name in x_names: if x_name.find(',') >= 0: x = None # get data using sur_dictxy #x.append([self.get_col(db, name) for name in x_name.split(',')]) else: x.append(self.get_col(db, x_name)) if x != None: x = np.array(x).T # YE ye = [] ye_names = data['spec']['YE'] for ye_name in ye_names: if ye_name.find(',') >= 0: ye.append( [self.get_col(db, name) for name in ye_name.split(',')]) else: ye.append(self.get_col(db, ye_name)) ye = np.array(ye).T # H h = [] h_names = data['spec']['H'] for h_name in h_names: if h_name.find(',') >= 0: h.append( [self.get_col(db, name) for name in h_name.split(',')]) else: h.append(self.get_col(db, h_name)) h = np.array(h).T mtypes = { 0: 'Standard', 1: 'Spatial Lag', 2: 'Spatial Error', 3: 'Spatial Lag+Error' } model_type = mtypes[data['modelType']['mType']] print model_type # estimation methods # method_types = {0: 'gm', 1: 'ml'} method_types = {0: 'ols', 1: 'gm', 2: 'ml'} method = method_types[data['modelType']['method']] print method # R if data['spec']['R']: name_r = data['spec']['R'] r = self.get_col(db, name_r) else: name_r = None r = None # T if data['spec']['T']: name_t = data['spec']['T'] t = self.get_col(db, name_t) else: name_t = None t = None # S if data['spec']['S']: name_s = data['spec']['S'] s = self.get_col(db, name_s) else: s = None name_s = None # SUR: get data (1) HR60,HR70 (2) using Time, Space if name_y.find(',') >= 0: y_var0 = name_y.split(',') x_var0 = [x_name.split(',') for x_name in x_names] y, x, name_y, x_names = sur_dictxy(db, y_var0, x_var0) yend_var1 = [name.split(',') for name in ye_names] ye, ye_names = sur_dictZ(db, yend_var1) q_var1 = [name.split(',') for name in h_names] h, h_names = sur_dictZ(db, q_var1) # regimes shrinks to n #r = r[ : len(r) / len(y)] elif name_s and name_t and len(name_s) > 0 and len(name_t) > 0: y, x, name_y, x_names = sur_dictxy(db, [name_y], [x_names], space_id=[name_s], time_id=[name_t]) if len(ye_names) > 0: yend_var1 = [name.split(',') for name in ye_names] ye, ye_names = sur_dictZ(db, yend_var1, form="plm", space_id=[name_s], time_id=[name_t]) if len(h_names) > 0: q_var1 = [name.split(',') for name in h_names] h, h_names = sur_dictZ(db, q_var1, form="plm", space_id=[name_s], time_id=[name_t]) # regimes shrinks to n if r: r = r[:len(r) / len(y)] config = data['config'] if self.getMWeightsEnabled() and model_type in \ ['Standard', 'Spatial Lag']: LM_TEST = True else: LM_TEST = False print w_list, wk_list fname = os.path.basename(data['fname']) results = Spmodel( name_ds=fname, w_list=w_list, wk_list=wk_list, y=y, name_y=name_y, x=x, name_x=x_names, ye=ye, name_ye=ye_names, h=h, name_h=h_names, r=r, name_r=name_r, s=s, name_s=name_s, t=t, name_t=name_t, model_type=model_type, # data['modelType']['endogenous'], # data['modelType']['spatial_tests']['lm'], spat_diag=LM_TEST, white=data['modelType']['error']['white'], hac=data['modelType']['error']['hac'], kp_het=data['modelType']['error']['het'], # config..... sig2n_k_ols=config['sig2n_k_ols'], sig2n_k_tsls=config['sig2n_k_2sls'], sig2n_k_gmlag=config['sig2n_k_gmlag'], max_iter=config['gmm_max_iter'], stop_crit=config['gmm_epsilon'], inf_lambda=config['gmm_inferenceOnLambda'], comp_inverse=config['gmm_inv_method'], step1c=config['gmm_step1c'], instrument_lags=config['instruments_w_lags'], lag_user_inst=config['instruments_lag_q'], vc_matrix=config['output_vm_summary'], predy_resid=predy_resid, ols_diag=config['other_ols_diagnostics'], moran=config['other_residualMoran'], white_test=config['white_test'], regime_err_sep=config['regimes_regime_error'], regime_lag_sep=config['regimes_regime_lag'], cores=config['other_numcores'], ml_epsilon=config['ml_epsilon'], ml_method=config['ml_method'], ml_diag=config['ml_diagnostics'], SUR_Spatdiagnostics=config['SURSpatdiagnostics'], SUR_NonSpatdiagnostics=config['SURNonSpatdiagnostics'], SUR_UseIterEst=config['SURUseIterEst'], method=method).output print results for r in results: path.write(r.summary) path.write('\n\n\n') return [r.summary for r in results] """
def write_tensor(x, path): for i in range(x.size(0)): for j in range(x.size(1)): for k in range(x.size(2)): path.write('%.10f \n' % x[i, j, k].item())
def existingRunner(tmpdir, name): path = tmpdir.join(name) path.write("#!/bin/sh") return str(path), Runner(name, str(path))
def _replace(self, path, what, to): with open(path, 'r') as p: data = p.read() data = data.replace(what, to) with open(path, 'w') as p: p.write(data)
def savePlaylistToFile(playlist, path): path.parent.mkdir(parents=True, exist_ok=True) path = open(path, 'w') path.write(playlist) path.close()