def execute(self): if not self.empty_flag: try: stdout_file = open("data_products/stdout.txt", "a") stderr_file = open("data_products/stderr.txt", "a") for cmd in self.command: if not isinstance(cmd, list): cmd = cmd.split() proc = Popen(cmd, stdout=stdout_file, stderr=stderr_file) proc.communicate() stdout_file.flush() stdout_file.close() # Put a copy of the parameter file for the job into the # data_products folder. os.system("cp /home/ubuntu/data/params.txt " "/home/ubuntu/data_products/") # Check the files in the output folder self.output_files = listdir_fullpath("/home/ubuntu/data_products/") if len(self.output_files) == 0: raise Exception("No output files found.") self.message_dict['execute'] = "Successfully executed command." except Exception: self.success = False self.message_dict['execute'] = tr.format_exc() self.output_files = [] try: stdout_file.close() except Exception: pass
def luchizz_shell(): """Customize the bash prompt and behavior for a more stylish experience""" # Load the luchizz bashrc script global LUCHIZZ_DIR luchizz_profile = os.path.join(LUCHIZZ_DIR, 'files/profile/luchizz-profile.sh') with open(luchizz_profile, 'r') as f: luchizz_profile = f.read() # Installing default bash changes for newly created users # FIXME for what the hell is used this folder? # currently this is causing issues if you connect to localhost debug needed # new users seems to rely only on /etc/skel/.bashrc # ~files = put('./files/profile/*', '/etc/profile.d/', use_sudo=True) # ~for f in files: # ~sudo('chown root: {}'.format(f)) # ~sudo('chmod 644 {}'.format(f)) # Update the skel file if not contains('/etc/skel/.bashrc', 'luchizz'): append('/etc/skel/.bashrc', luchizz_profile, use_sudo=True) # Set huge history for newly created users sed('/etc/skel/.bashrc', 'HISTSIZE=.*', 'HISTSIZE=1000000', use_sudo=True) sed('/etc/skel/.bashrc', 'HISTFILESIZE=.*', 'HISTFILESIZE=1000000', use_sudo=True) # Appending bash changes to current users and root homes = utils.listdir_fullpath('/home') homes.append('/root') for u in homes: bashrc_file = os.path.join(u, '.bashrc') if not exists(bashrc_file, use_sudo=True): continue sed(bashrc_file, 'HISTSIZE=.*', 'HISTSIZE=1000000', use_sudo=True) sed(bashrc_file, 'HISTFILESIZE=.*', 'HISTFILESIZE=1000000', use_sudo=True) if not contains(bashrc_file, 'luchizz'): append(bashrc_file, luchizz_profile, use_sudo=True) # Alternate mappings for "page up" and "page down" to search the history # uncomment the following lines in /etc/inputrc # "\e[5~": history-search-backward # "\e[6~": history-search-forward uncomment('/etc/inputrc', 'history-search-forward', use_sudo=True) uncomment('/etc/inputrc', 'history-search-backward', use_sudo=True) # Enable vim syntax uncomment('/etc/vim/vimrc', 'syntax on', char='"', use_sudo=True)
def test_listdir_fullpath(self): """ Ensures the full path of every file or folder is returned in a given directory when the `listdir_fullpath() method is called. """ fake_filepaths = [] min_value = 0 max_value = 10 iterations = fake.pyint(min_value=min_value, max_value=max_value) root = self.test_folder for iteration in range(iterations): filename = fake.word() fake_filepath = os.path.join(root, filename) fake_filepaths.append(fake_filepath) os.makedirs(fake_filepath) test_filepaths = utils.listdir_fullpath(directory=root) self.assertEqual(sorted(test_filepaths), sorted(fake_filepaths))
def LoadData(data_path, max_classes=4, max_length=40): '''Load data from the txt files in all subfolders of given data_path max_length is the maximum length for acceleroemter of each data sample ''' data, labels = [], [] subfolder_list = os.listdir(data_path) assert len(subfolder_list) > 0, "Contains no subfolders" for ix, subfolder in enumerate(subfolder_list): subfolder_path = os.path.join(data_path, subfolder) files_list = utils.listdir_fullpath(subfolder_path) print("Got {} files in {}".format(len(files_list), subfolder_path)) labels_identity = np.eye(max_classes) for idx, filename in enumerate(files_list): # label comes from folder name label = int(os.path.splitext(os.path.basename(filename))[0]) with open(filename, 'r') as f: content = f.readlines() data_trial = [] num_trial = 0 for row in content: # trials are separated by "\n" if row == "\n": # make all trial length the same size if label == 0: # collect first 40 for pickup data.append( list(itertools.chain(*data_trial[:max_length]))) else: # collect last 40 for all others data.append( list(itertools.chain(*data_trial[-max_length:]))) # print("data trial len = {}".format(len(data_trial))) labels.append(labels_identity[label]) data_trial = [] num_trial += 1 else: row_list = row.strip('\n').split('\t') data_trial.append(row_list) # the last trial # make all trial length the same size if label == 0: # collect first 40 for pickup data.append(list(itertools.chain(*data_trial[:max_length]))) else: data.append(list(itertools.chain(*data_trial[-max_length:]))) labels.append(labels_identity[label]) print("loaded {} trials from {}".format(num_trial + 1, filename)) print("\nTotal trials = {} , Min trial length = {}, Max trial length = {}". format(len(labels), min(utils.convert_len_2Dlist(data)) / 3, max(utils.convert_len_2Dlist(data)) / 3)) X, y = np.array(data, dtype=np.float32), np.array(labels, dtype=np.float32) return X, y
print("finished writing file {}".format(i), end='\r') if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-n_jobs", type=int, required=True) parser.add_argument("-num_summaries", type=int, required=True) parser.add_argument("-in_dir", type=str, required=True) parser.add_argument("-out_dir", type=str, required=True) args = parser.parse_args() assert os.path.isdir(args.in_dir) assert os.path.isdir(args.out_dir) # Read summary files in parallel input_fnames = listdir_fullpath(args.in_dir) with parallel_backend('multiprocessing', n_jobs=args.n_jobs): all_summaries = Parallel()(delayed(read_out_file)(idx, fname) for idx, fname in enumerate(input_fnames)) # sort summaries according to document number all_summaries = sorted(all_summaries, key=lambda x: x[0]) all_summaries = [tup[1] for tup in all_summaries] with parallel_backend('multiprocessing', n_jobs=args.n_jobs): unique_summaries = Parallel()( delayed(remove_duplicates)(idx, summaries) for idx, summaries in enumerate(all_summaries)) output_fnames = [ args.out_dir + "/out_{}.txt".format(i) for i in range(args.num_summaries) ]
parser.add_argument('--dataset', help='Path to fixations in .mat files', required=True) parser.add_argument('--output', help='Path to folder where maps should be stored', required=True) args = parser.parse_args() print("loading .mat files from: " + args.dataset) extracted_data = list( map( lambda x: { 'file': x.rsplit('/', 1)[1].replace(".mat", ".fix.png"), **get_size_and_fix(loadmat(x)['s']) }, listdir_fullpath(args.dataset))) for data in extracted_data: print("working on file: " + data['file']) final_map = np.zeros(data['size']) for i in range(1, 6): user_key = 'user_' + str(i) user_fix = data[user_key] final_map = apply_fixations(data['size'], data[user_key], final_map) print(final_map.shape) final_map = GaussianBlur(final_map, (95, 95), 0) toimage(final_map.T).save(os.path.join(args.output, data['file']))