def setUpModule(): global TEST_FILE_1_PATH, TEST_FILE_NOT_OGG_PATH, TEST_FILE_NOT_VORBIS_PATH if not os_path_exists(TEST_FILE_1_PATH): global test_file_1_was_downloaded, TEST_FILE_1_URL test_file_1_was_downloaded = True with urlopen(TEST_FILE_1_URL) as response, (open( TEST_FILE_1_PATH, 'wb')) as out_file: shutil_copyfileobj(response, out_file) if not os_path_exists(TEST_FILE_NOT_OGG_PATH): global test_file_not_ogg_was_downloaded, TEST_FILE_NOT_OGG_URL test_file_not_ogg_was_downloaded = True with urlopen(TEST_FILE_NOT_OGG_URL) as response, (open( TEST_FILE_NOT_OGG_PATH, 'wb')) as out_file: shutil_copyfileobj(response, out_file) if not os_path_exists(TEST_FILE_NOT_VORBIS_PATH): global test_file_not_vorbis_was_downloaded, TEST_FILE_NOT_VORBIS_URL test_file_not_vorbis_was_downloaded = True with urlopen(TEST_FILE_NOT_VORBIS_URL) as response, (open( TEST_FILE_NOT_VORBIS_PATH, 'wb')) as out_file: shutil_copyfileobj(response, out_file)
def makedir(self): """creates the directory and data file and log file""" if not os_path_exists(self.dir_path): os_makedirs(self.dir_path) log_info("Made directory at: {0}".format(self.dir_path)) if not os_path_exists(self.file_path): self.save_file = self._default_save_file() #self.create_file() make_log_file(self.log_path, mode="w")
def makedir(self): """creates the directory and data file and log file""" if not os_path_exists(self.dir_path): os_makedirs(self.dir_path) log_info("Made directory at: {0}".format(self.dir_path)) if not os_path_exists(self.file_path): self.save_file=self._default_save_file() #self.create_file() make_log_file(self.log_path, mode="w")
def tearDown(self) -> None: if os_path_exists(self._changes_codec._STAGED_PATH): os_remove(self._changes_codec._STAGED_PATH) if os_path_exists(_TEMP_PATH_1): os_remove(_TEMP_PATH_1) if os_path_exists(_TEMP_PATH_2): os_remove(_TEMP_PATH_2)
def _observe_dir_path(self, change): """if the file path exists and the file location is changed, this function moves the entire directory to the new location and sets up the log file for appended logging""" if change['type']!='create': old_dir_path=change['oldvalue'] if not os_path_exists(self.file_path): if os_path_exists(old_dir_path): remove_log_file() move(old_dir_path, self.dir_path) make_log_file(self.log_path) log_info("Moved files to: {0}".format(self.dir_path))
def _observe_dir_path(self, change): """if the file path exists and the file location is changed, this function moves the entire directory to the new location and sets up the log file for appended logging""" if change['type'] != 'create': old_dir_path = change['oldvalue'] if not os_path_exists(self.file_path): if os_path_exists(old_dir_path): remove_log_file() move(old_dir_path, self.dir_path) make_log_file(self.log_path) log_info("Moved files to: {0}".format(self.dir_path))
def merge_posting_pairs(self, posting_files, posting_names): """ Merges every two consecutive posting file pairs in the posting files list Deletes already merged and un-needed posting files from disk :param posting_files: list of posting files to be merged internally :param posting_names: list of posting file names :return: returns a new list containing merged posting files """ # merge posting files into one complete and legal posting file i, j = 0, 1 merged_postings = [] while i < len(posting_files): if j < len(posting_files): merged_postings.append( self.merge_postings(posting_files[i], posting_files[j])) else: # if second element in pair doesn't exist add first element as is merged_postings.append(posting_files[i]) i += 2 j += 2 # delete already merged posting files from disk for posting in posting_names: if os_path_exists(self.output_path + "{}".format(posting) + ".pkl"): os_remove(self.output_path + "{}".format(posting) + ".pkl") return merged_postings
def make_cmd_args(url_page, options, streaming=False): if not find_in_path(os_environ['PATH'], 'rtmpdump'): print >> stderr, 'Error: rtmpdump has not been found' exit(1) video_url, player_url = get_rtmp_url(url_page, quality=options.quality, lang=options.lang) output_file = None if not streaming: output_file = urlparse(url_page).path.split('/')[-1].replace('.html','.flv') cmd_args = '-r %s --swfVfy %s --flv %s' % (video_url, player_url, output_file) else: cmd_args = '-r %s --swfVfy %s' % (video_url, player_url) if not options.verbose: cmd_args += ' --quiet' if not streaming: if os_path_exists(output_file): # try to resume a download cmd_args += ' --resume' print ':: Resuming download of %s' % output_file else: print ':: Downloading to %s' % output_file else: print ':: Streaming from %s' % video_url return cmd_args
def test_does_not_exist(self): cvs_add_result: Tuple[str, str] = self._changes_codec.cvs_add(_TEMP_PATH_1) self.assertEqual(cvs_add_result, ('does not exist', _TEMP_PATH_1)) self.assertFalse(os_path_exists(self._changes_codec._STAGED_PATH))
def _storeArticle(article): """ _safeArticle(Dict) -> Bool private help method to safe an aticle param article:Dict - """ # try: #make a path according to the article's topics path = re_sub('http://www.spiegel.de/','', article['link']).split('/') filename = path.pop(-1) storePath = os_path_join(BASE_PATH,os_path_join(*path)) #create directories if not os_path_exists(storePath): os_makedirs(storePath) #write article as json to the file with open(os_path_join(storePath, filename),'w') as o: json.dump(article, o) #write the article name to the log if os_path_isfile(BASE_PATH + 'article_log'): log = open(BASE_PATH + 'article_log','a') else: log = open(BASE_PATH + 'article_log','w') log.write(article['link'] + '\n') log.close() return True
def __init__(self): self.allowed_characters = { "alphabets_lower": ",".join(string_ascii_lowercase).split(","), "alphabets_upper": ",".join(string_ascii_uppercase).split(","), "numbers": [str(number) for number in range(10)], "symbols": [ "!", "@", "#", "$", "%", "&", "*", "-", "_", "+", "=", ], # can't use comma since it actually exists in the list } self.file_name = os_path_join(os_path_expanduser("~"), ".ron_password_manager") if not os_path_exists(self.file_name): # generate new random string self.secret = self.generate_random_password() self.passwords = {} self.save_state() else: with open(self.file_name, "r") as json_file: json_output = json_load(json_file) self.secret = json_output.get("secret") self.passwords = json_output.get("passwords") self.vigenere = VigenereCipher(secret=self.secret)
def make_cmd_args(url_page, options, streaming=False): if not find_in_path(os_environ['PATH'], 'rtmpdump'): print >> stderr, 'Error: rtmpdump has not been found' exit(1) video_url, player_url = get_rtmp_url(url_page, quality=options.quality, lang=options.lang) output_file = None if not streaming: output_file = urlparse(url_page).path.split('/')[-1].replace( '.html', '.flv') cmd_args = '-r %s --swfVfy %s --flv %s' % (video_url, player_url, output_file) else: cmd_args = '-r %s --swfVfy %s' % (video_url, player_url) if not options.verbose: cmd_args += ' --quiet' if not streaming: if os_path_exists(output_file): # try to resume a download cmd_args += ' --resume' print ':: Resuming download of %s' % output_file else: print ':: Downloading to %s' % output_file else: print ':: Streaming from %s' % video_url return cmd_args
def setup_class(self): self.file_name = os_path_join(os_path_expanduser("~"), ".list_manager") if os_path_exists(self.file_name): self.requires_cleanup = True with open(self.file_name, "r") as json_input_file: self.stored_lists = json_load(json_input_file) else: self.requires_cleanup = False
def __init__(self): self.file_name = os_path_join(os_path_expanduser("~"), ".list_manager") if not os_path_exists(self.file_name): self.lists = {} else: with open(self.file_name, "r") as json_input_file: json_output = json_load(json_input_file) self.lists = json_output.get("lists")
def save_data_to_json_file(data, jsonFilePath, indentation=None): os_mkdir(jsonFilePath) if (not os_path_exists(jsonFilePath)) else True if (indentation): print("\n{}".format(indentation)) print("Saving data to '{}'... ".format(indentation, jsonFilePath), end="") with open(jsonFilePath, "w", encoding="utf-8") as f: json_dump(data, f, ensure_ascii=False, sort_keys=True, indent=4) print("Done. \n")
def test_checkout_head(rc: ServerRepoCtx): master: git.Reference = git.Reference(rc.repo, "refs/heads/master") # FIXME: only touches paths recorded in index - stray worktree files not removed etc # see IndexFile.checkout vs head.checkout # FIXME: use force=True ? #index: git.IndexFile = git.IndexFile.from_tree(rc.repo, master.commit.tree) #index.checkout() assert os_path_exists(str(rc.repodir / '.git')) rc.repo.head.reset(master.commit, index=True, working_tree=True)
def load_dataset_statistics(self, json_path): if os_path_exists(json_path): save_statistics = False with open(json_path) as json_file: dataset_statistics = json.load(json_file) else: logging.warning(f"WARNING! {json_path} not found!") dataset_statistics = None return dataset_statistics
def save_code(self, obj): """saves the code containing the passed in object""" if obj is not None: module_path, ext = os_path_splitext(getfile(obj)) code_file_path = module_path + '.py' # Should get the py file, not the pyc, if compiled. code_file_copy_path = self.dir_path+self.divider+os_path_split(module_path)[1]+".pyb" if not os_path_exists(code_file_copy_path): copyfile(code_file_path, code_file_copy_path) log_info("Saved code to: {0}".format(code_file_copy_path))
def save_code(self, obj): """saves the code containing the passed in object""" if obj is not None: module_path, ext = os_path_splitext(getfile(obj)) code_file_path = module_path + '.py' # Should get the py file, not the pyc, if compiled. code_file_copy_path = self.dir_path + self.divider + os_path_split( module_path)[1] + ".pyb" if not os_path_exists(code_file_copy_path): copyfile(code_file_path, code_file_copy_path) log_info("Saved code to: {0}".format(code_file_copy_path))
def saver(path, content=True, txt=False): dir_path = ''.join(path.split('/')[:-1]) os_mkdir(dir_path) if not os_path_exists(dir_path) else True if content: with open(path, 'wb') as f: f.write(content) elif (txt): with open(path, 'w', encoding='utf-8') as f: f.write(txt)
def setup_method(self): if os_path_exists(self.file_name): os_remove(self.file_name) self.list_manager = ListManager() self.initial_lists = { "list_a": ["item_a", "item_b"], "list_b": "item_c" } self.list_manager.lists = self.initial_lists self.list_manager.export_list_to_file()
def loadDefaultConfig(): conf = {} if not os_path_exists(DEFAULT_SETTINGS_JSON): initDefaultConfig() with open(DEFAULT_SETTINGS_JSON, 'r', encoding='utf-8') as f: conf = json_load(f) if "DATA_DIR" not in conf: conf["DATA_DIR"] = DATA_DIR with open(DEFAULT_SETTINGS_JSON, 'w+', encoding='utf-8') as f: json_dump(conf, f) return conf
def cvs_init(self): """Initializes edu-cvs repository in current working directory""" try: if not os_path_exists(self._CVS_DIR_PATH): os_mkdir(self._CVS_DIR_PATH) # if not os_path_exists(self._COMMITTED_PATH): # os_mkdir(self._COMMITTED_PATH) except OSError as occurred_err: raise OSError('init: cannot create folder', *occurred_err.args)
def work(self): if os_path_exists(self.profilesDir): shutil_rmtree(self.profilesDir) os_makedirs(self.profilesDir) stuInfos = self.getStuInfo(self.infoPath) dates, stuDatas = self.getDatasFromDing(self.excelsDir, stuInfos) self.generateEveryoneProfile(self.profilesDir, stuInfos, dates, stuDatas) self._finished.emit()
def test_not_a_file(self): os_mkdir(_TEMP_PATH_1) cvs_add_result: Tuple[str, str] = self._changes_codec.cvs_add(_TEMP_PATH_1) self.assertEqual(cvs_add_result, ('not a file', _TEMP_PATH_1)) self.assertFalse(os_path_exists(self._changes_codec._STAGED_PATH)) shutil_rmtree(_TEMP_PATH_1)
def makedir(self, name): # name.replace(' ', '').replace('.', 'ß') a = True for author in self.AUTHORS: if author in name: path = "./img-dl-threads/Gitograms/" + author + "/" + name os_mkdir(path) if (not os_path_exists(path)) else True a = False if a: path = "./img-dl-threads/Gitograms/Others/" + name os_mkdir(path) return path
def fetchFinished(self,x,webcamid): print "fetchFinished",x,webcamid self.pixmaps_to_load.remove(webcamid) sc = AVSwitch().getFramebufferScale() if (os_path_exists("/tmp/"+str(webcamid)+"_thumb.jpg") == True): self.picloads[webcamid] = ePicLoad() self.picloads[webcamid].PictureData.get().append(boundFunction(self.finish_decode, webcamid)) self.picloads[webcamid].setPara((self["thumbnail"].instance.size().width(), self["thumbnail"].instance.size().height(), sc[0], sc[1], False, 1, "#00000000")) self.picloads[webcamid].startDecode("/tmp/"+str(webcamid)+"_thumb.jpg") else: print "[decodePic] Thumbnail file NOT FOUND !!!-->:",thumbnailFile
def _open(self): for k, dsn in self._hub.items(): v = self._kwargs(dsn) if not v[0]: continue try: driver = __import__(v[0]) except ImportError as e: warnings.warn(repr(e)) else: try: if 'sqlite3' == v[0]: connect = driver.connect(dsn[11:]) #substr sqlite3:/// elif 'postgresql' == v[0]: connect = driver.open(dsn.replace('postgresql', 'pq')) #py-postgresql open elif 'psycopg2' == v[0]: connect = driver.connect(' '.join([ '='.join(l) for l in v[1].items()])) else: connect = driver.connect(v[1]) except driver.OperationalError as e: warnings.warn(repr(e)) else: if self._callback: self._callback(connect) self._hub[k] = connect if self._prepare: file_ = self._prepare.get(k, None) if file_: if os_path_exists(file_) and os_access(file_, os_R_OK): try: f = open(file_, 'r') except IOError as e: warnings.warn(repr(e)) else: sqls = f.read().split(';') try: cursor = connect.cursor() except driver.Error as e: warnings.warn(repr(e)) else: for sql in sqls: try: connect.execute(sql) except driver.Error as e: warnings.warn(repr(e)) finally: cursor.close() finally: f.close()
def cvs_add(self, input_path: str) -> Tuple[str, str]: """Adds single file to staged changes Returns pair: result status AND filename Possible return statuses: 'does not exist' 'success' 'not a file' 'already added' """ if not os_path_exists(self._CVS_DIR_PATH): raise CodecException('add: init repo first') if not os_path_exists(input_path): return 'does not exist', input_path elif os_path_isfile(input_path): repo_changes: 'ChangesCodec.RepositoryChanges' if os_path_exists(self._STAGED_PATH): repo_changes = self._decode_changes(self._STAGED_PATH) else: repo_changes = self.RepositoryChanges() if input_path in repo_changes.addition: return 'already added', input_path repo_changes.addition.append(input_path) self._encode_changes(repo_changes, self._STAGED_PATH) return 'success', input_path else: return 'not a file', input_path
def find_idx(dpath=None): if not dpath: dpath = 'data/' if not os_path_exists(dpath): os_mkdir(dpath) # starting from zero index return 0 # path/prefix length pl = len(dpath) # suffix/extension length sl = len('.jpg') # get next index idx = 1 + int(max(glob(dpath+'*.jpg'))[pl:-sl]) if glob(dpath+'*.jpg') else 0 return idx
def find_idx(dpath=None, ftype='.jpg'): """ Scans data directory for last file number and returns next index. Creates data directory and returns index 0 if none exists. """ if not dpath: dpath = 'data/' if not os_path_exists(dpath): os_mkdir(dpath) # if new directory start new index return 0 # path/prefix length pl = len(dpath) # suffix/extension length sl = len(ftype) # get next index idx = 1 + int(max(glob(dpath + '*.jpg'))[pl:-sl]) if glob(dpath + '*' + ftype) else 0 return idx
def init_directory(bad_urls_dir, tei_logger): """Initialise bad_urls_dir: 1. Resolve path (absolute or relative to the working directory) 2. Check if not exists and create it (warn if exists) 3. Check if bad_urls_dir is a directory (fail gracefully if it is other than directory) 4. Check if bad_urls_dir empty (warn if not) """ if not os_path_isabs(bad_urls_dir): bad_urls_dir = os_path_join(os_path_abspath(getcwd()), bad_urls_dir) if os_path_exists(bad_urls_dir): tei_logger.log('WARNING', f'{bad_urls_dir} exists!') else: makedirs(bad_urls_dir, exist_ok=True) if not os_path_isdir(bad_urls_dir): tei_logger.log('CRITICAL', f'{bad_urls_dir} is not a directory!') exit(1) if len(listdir(bad_urls_dir)) > 0: tei_logger.log('WARNING', f'{bad_urls_dir} is not empty!') return bad_urls_dir
def update_db(self): DB_update_folder = DB_DIR+"/Updates"; if(not os_path_exists(DB_update_folder)): return; # Get all files that match versioning file_versions = []; for file in os_listdir(DB_update_folder): filepath = os_path_join(DB_update_folder, file); # Try to get the file's name excluding extension (valid filename example: v0.0.0.sql) version_string = Version.version_string(os_path_splitext(os_path_basename(filepath))[0]); # Include only files with proper version names within update range if(os_path_isfile(filepath) and version_string and self.version_is_usable(Version(version_string))): file_versions.append({"path": filepath, "version": Version(version_string)}); file_versions.sort(key=lambda file_version : file_version["version"]); for file in file_versions: if(self.call_shell_command(["sudo", "mysql", "-u", "root", "<", file["path"]])): raise Exception(f"Failed to update DB with file {file['path']}");
def updateSettings(): global MAP_TYPE, FUNC_TYPE, START_ROW, ONLY_FIRST_SHEET, SHOW_DISTANCE, DATA_DIR, BROWSER_PATH config = {} if not os_path_exists(USER_SETTINGS_JSON): config = loadDefaultConfig() log.debug('load Default Config') else: config = loadUserConfig() log.debug('load User Config') MAP_TYPE = config['MAP_TYPE'] if 'MAP_TYPE' in config else MAP_TYPE FUNC_TYPE = config['FUNC_TYPE'] if 'FUNC_TYPE' in config else FUNC_TYPE START_ROW = config['START_ROW'] if 'START_ROW' in config else START_ROW ONLY_FIRST_SHEET = config[ 'HANDLE_SHEET'] == 'first' if 'HANDLE_SHEET' in config else ONLY_FIRST_SHEET SHOW_DISTANCE = config[ 'SHOW_DISTANCE'] == 'show' if 'SHOW_DISTANCE' in config else SHOW_DISTANCE BROWSER_PATH = config[ 'BROWSER_PATH'] if 'BROWSER_PATH' in config else BROWSER_PATH DATA_DIR = config['DATA_DIR'] if 'DATA_DIR' in config else DATA_DIR return config
def serveAt(self, addr): """ Open a server socket at specified location and wait for a connection """ assert self.sock is None # Make sure the socket does not already exist try: unlink(addr) except OSError: if os_path_exists(addr): raise # Create a UDS socket self.sock = socket(AF_UNIX, SOCK_STREAM) # Bind the socket to the port self.sock.bind(addr) self.addr = addr self.sock.listen(1) conn, caddr = self.sock.accept() self.conn = conn return
def read_model_params(model_params_fname): """Read and return model params from json (text) file.""" # print('read_model_params: model_params_fname = {}'.format(model_params_fname)) if not os_path_exists(model_params_fname): raise OSError('utils.read_model_params: {} doesn\'t exist'.format( model_params_fname)) with open(model_params_fname, 'r') as f: if model_params_fname.endswith('.json'): try: model_params = json_load(f) except Exception as e: raise e elif model_params_fname.endswith('.txt'): model_params = {} for line in f: [key, value] = line.split(',') value = value.rstrip() # Try to read string values if isinstance(value, str): # If value can be turned into a float, then it could also # be an integer. try: value = float(value) if value.is_integer(): value = int(value) except ValueError: # If value cannot be turned into a float, then it must # not be a number. In this case, don't do anything and # pass it on as string. pass model_params[key] = value else: raise IOError( 'read_model_params: got incorrect model_params_fname: {}'. format(model_params_fname)) return model_params
def _create(self, name, cpu_cores, memory_allocation, # Basic details, name etc. hard_drives=None, # List of hard drive sizes to be created network_interfaces=None, # List of networks to create network interfaces # to attach to node=None, # The node to initially register the VM on available_nodes=None, # List of nodes that the VM will be availble to. # For DRBD, this will be the two nodes # that DRBD is setup on. For other storage types, # it will be the nodes that the VM 'MUST' be # compatible with, i.e. storage backend must span # across them and networks exist on all nodes. storage_type=None, # Storage type (string) hard_drive_driver=None, graphics_driver=None, modification_flags=None, storage_backend=None, # Storage backend to be used. If not specified, # will default to an available storage backend, # if only 1 is avaiallbe. is_static=None): # Manually override whether the VM is marked as static """Create a VM and returns the virtual_machine object for it""" # @TODO: Does this method need to do EVERYTHING? # Maybe it should create the BARE MINIMUM required for a VM # and leave it up to the parser to create everything else. # The benefit to doing it in one function is to be able to # validate that everything will work before-hand. # Set iterative items to empty array if not specified. # Can't set these to empty arrays by default, as if we attempt to append to them, # it will alter the default array (since it will be a reference)! network_interfaces = [] if network_interfaces is None else network_interfaces hard_drives = [] if hard_drives is None else hard_drives nodes_predefined = available_nodes is not None available_nodes = [] if available_nodes is None else available_nodes modification_flags = [] if modification_flags is None else modification_flags # Convert memory and disk sizes to bytes hard_drives = [hdd_size if isinstance(hdd_size, int) else SizeConverter.from_string(hdd_size, storage=True).to_bytes() for hdd_size in hard_drives] memory_allocation = (memory_allocation if memory_allocation is isinstance(memory_allocation, int) else SizeConverter.from_string(memory_allocation).to_bytes()) if storage_backend: storage_backend = self._convert_remote_object(storage_backend) # Ensure name is valid, as well as other attributes self.checkName(name) ArgumentValidator.validate_positive_integer(cpu_cores) ArgumentValidator.validate_positive_integer(memory_allocation) for hard_drive in hard_drives: ArgumentValidator.validate_positive_integer(hard_drive) if network_interfaces: for network_interface in network_interfaces: ArgumentValidator.validate_network_name(network_interface) if node is not None: ArgumentValidator.validate_hostname(node) for available_node in available_nodes: ArgumentValidator.validate_hostname(available_node) cluster_object = self._get_registered_object('cluster') local_hostname = get_hostname() if node and available_nodes and node not in available_nodes: raise InvalidNodesException('Node must be in available nodes') total_storage_size = sum(hard_drives) if hard_drives else None available_nodes, storage_backend, storage_type = self._pre_create_checks( required_storage_size=total_storage_size, networks=network_interfaces, storage_type=storage_type, nodes=available_nodes, storage_backend=storage_backend ) # If a node has not been specified, assume the local node if node is None: node = local_hostname # Ensure that the local node is included in the list of available nodes if self._is_cluster_master and local_hostname not in available_nodes: raise InvalidNodesException('Local node must included in available nodes') # Ensure storage_type is a valid type, if specified hard_drive_factory = self._get_registered_object('hard_drive_factory') assert storage_type in [None] + [ storage_type_itx.__name__ for storage_type_itx in self._get_registered_object( 'hard_drive_factory').get_all_storage_types() ] # Obtain the hard drive driver enum from the name if hard_drive_driver is not None: HardDriveDriver[hard_drive_driver] # If no graphics driver has been specified, set it to the default if graphics_driver is None: graphics_driver = self.DEFAULT_GRAPHICS_DRIVER # Check the driver name is valid self.ensure_graphics_driver_valid(graphics_driver) # Ensure the cluster has not been ignored, as VMs cannot be created with MCVirt running # in this state if self._cluster_disabled: raise ClusterNotInitialisedException('VM cannot be created whilst the cluster' + ' is not initialised') # Determine if VM already exists if self.check_exists_by_name(name): raise VmAlreadyExistsException('Error: VM already exists') # Create directory for VM on the local and remote nodes if os_path_exists(VirtualMachine.get_vm_dir(name)): raise VmDirectoryAlreadyExistsException('Error: VM directory already exists') if local_hostname not in available_nodes and self._is_cluster_master: raise InvalidNodesException('One of the nodes must be the local node') # Create VM configuration file # This is hard coded method of determining is_static, as seen in hard drive object # @TODO Refactor into method that's shared with is_static config_nodes = (None if ((storage_backend and storage_backend.shared and storage_type == 'Local') or (is_static is not None and not is_static)) else available_nodes) id_ = VirtualMachine.generate_id(name) # Start transaction t = Transaction() vm_object = self.create_config( id_, name, config_nodes, cpu_cores, memory_allocation, graphics_driver, nodes=self._get_registered_object('cluster').get_nodes(include_local=True)) if node == get_hostname(): # Register VM with LibVirt. If MCVirt has not been initialised on this node, # do not set the node in the VM configuration, as the change can't be # replicated to remote nodes vm_object._register(set_node=self._is_cluster_master) elif self._is_cluster_master: # If MCVirt has been initialised on this node and the local machine is # not the node that the VM will be registered on, set the node on the VM vm_object._setNode(node) if self._is_cluster_master: # Create disk images hard_drive_factory = self._get_registered_object('hard_drive_factory') for hard_drive_size in hard_drives: hard_drive_factory.create(vm_object=vm_object, size=hard_drive_size, storage_type=storage_type, driver=hard_drive_driver, storage_backend=storage_backend, nodes=available_nodes) # If any have been specified, add a network configuration for each of the # network interfaces to the domain XML network_adapter_factory = self._get_registered_object('network_adapter_factory') network_factory = self._get_registered_object('network_factory') if network_interfaces is not None: for network in network_interfaces: network_object = network_factory.get_network_by_name(network) network_adapter_factory.create(vm_object, network_object) # Add modification flags vm_object._update_modification_flags(add_flags=modification_flags) t.finish() return vm_object
def _create(self, name, cpu_cores, memory_allocation, hard_drives=[], network_interfaces=[], node=None, available_nodes=[], storage_type=None, hard_drive_driver=None): """Creates a VM and returns the virtual_machine object for it""" self.checkName(name) ArgumentValidator.validate_positive_integer(cpu_cores) ArgumentValidator.validate_positive_integer(memory_allocation) for hard_drive in hard_drives: ArgumentValidator.validate_positive_integer(hard_drive) if network_interfaces: for network_interface in network_interfaces: ArgumentValidator.validate_network_name(network_interface) if node is not None: ArgumentValidator.validate_hostname(node) for available_node in available_nodes: ArgumentValidator.validate_hostname(available_node) assert storage_type in [None] + [ storage_type_itx.__name__ for storage_type_itx in self._get_registered_object( 'hard_drive_factory').STORAGE_TYPES ] if hard_drive_driver is not None: HardDriveDriver[hard_drive_driver] # Ensure the cluster has not been ignored, as VMs cannot be created with MCVirt running # in this state if self._cluster_disabled: raise ClusterNotInitialisedException('VM cannot be created whilst the cluster' + ' is not initialised') # Determine if VM already exists if self.check_exists(name): raise VmAlreadyExistsException('Error: VM already exists') # If a node has not been specified, assume the local node if node is None: node = get_hostname() # If Drbd has been chosen as a storage type, ensure it is enabled on the node node_drbd = self._get_registered_object('node_drbd') if storage_type == 'Drbd' and not node_drbd.is_enabled(): raise DrbdNotEnabledOnNode('Drbd is not enabled on this node') # Create directory for VM on the local and remote nodes if os_path_exists(VirtualMachine._get_vm_dir(name)): raise VmDirectoryAlreadyExistsException('Error: VM directory already exists') # If available nodes has not been passed, assume the local machine is the only # available node if local storage is being used. Use the machines in the cluster # if Drbd is being used cluster_object = self._get_registered_object('cluster') all_nodes = cluster_object.get_nodes(return_all=True) all_nodes.append(get_hostname()) if len(available_nodes) == 0: if storage_type == 'Drbd': # If the available nodes are not specified, use the # nodes in the cluster available_nodes = all_nodes else: # For local VMs, only use the local node as the available nodes available_nodes = [get_hostname()] # If there are more than the maximum number of Drbd machines in the cluster, # add an option that forces the user to specify the nodes for the Drbd VM # to be added to if storage_type == 'Drbd' and len(available_nodes) != node_drbd.CLUSTER_SIZE: raise InvalidNodesException('Exactly two nodes must be specified') for check_node in available_nodes: if check_node not in all_nodes: raise NodeDoesNotExistException('Node \'%s\' does not exist' % check_node) if get_hostname() not in available_nodes and self._is_cluster_master: raise InvalidNodesException('One of the nodes must be the local node') # Create directory for VM makedirs(VirtualMachine._get_vm_dir(name)) # Add VM to MCVirt configuration def updateMCVirtConfig(config): config['virtual_machines'].append(name) MCVirtConfig().update_config( updateMCVirtConfig, 'Adding new VM \'%s\' to global MCVirt configuration' % name) # Create VM configuration file VirtualMachineConfig.create(name, available_nodes, cpu_cores, memory_allocation) # Add VM to remote nodes if self._is_cluster_master: def remote_command(remote_connection): virtual_machine_factory = remote_connection.get_connection( 'virtual_machine_factory' ) virtual_machine_factory.create( name=name, memory_allocation=memory_allocation, cpu_cores=cpu_cores, node=node, available_nodes=available_nodes ) cluster_object.run_remote_command(callback_method=remote_command) # Obtain an object for the new VM, to use to create disks/network interfaces vm_object = self.getVirtualMachineByName(name) vm_object.get_config_object().gitAdd('Created VM \'%s\'' % vm_object.get_name()) if node == get_hostname(): # Register VM with LibVirt. If MCVirt has not been initialised on this node, # do not set the node in the VM configuration, as the change can't be # replicated to remote nodes vm_object._register(set_node=self._is_cluster_master) elif self._is_cluster_master: # If MCVirt has been initialised on this node and the local machine is # not the node that the VM will be registered on, set the node on the VM vm_object._setNode(node) if self._is_cluster_master: # Create disk images hard_drive_factory = self._get_registered_object('hard_drive_factory') for hard_drive_size in hard_drives: hard_drive_factory.create(vm_object=vm_object, size=hard_drive_size, storage_type=storage_type, driver=hard_drive_driver) # If any have been specified, add a network configuration for each of the # network interfaces to the domain XML network_adapter_factory = self._get_registered_object('network_adapter_factory') network_factory = self._get_registered_object('network_factory') if network_interfaces is not None: for network in network_interfaces: network_object = network_factory.get_network_by_name(network) network_adapter_factory.create(vm_object, network_object) return vm_object
def makedir(self): """creates the directory and data file and log file""" if not os_path_exists(self.folder.dir_path): os_makedirs(self.folder.dir_path) log_info("Made directory at: {0}".format(self.folder.dir_path))
train_label = np.array([1] * train_pos_num + [0] * train_neg_num) test_label = [0] * test_num csv_list_test = csv_read( open('D:/dataset/test_result.csv', 'r', encoding='utf-8')) csv_list_test = list(csv_list_test)[0] csv_list_test[0] = csv_list_test[0].strip('\ufeff') for i in range(test_num): test_label[i] = int(csv_list_test[i]) test_label = np.array(test_label) # 1.图像特征向量读取 image_read_flag = 0 image_train_pos_root = './image_feat/train_pos.npy' image_train_neg_root = './image_feat/train_neg.npy' image_test_root = './image_feat/test.npy' if not os_path_exists('./image_feat/'): makedirs('./image_feat/') image_read_flag = 1 image_feat1 = np.empty([train_pos_num, 13]) image_feat2 = np.empty([train_neg_num, 13]) image_test = np.empty([test_num, 13]) if image_read_flag: for i in range(0, train_pos_num): image_feat1[i][:] = np.load(dataset_root + '/train/positive/' + str(i) + '/feat.npy') for i in range(0, train_neg_num): image_feat2[i][:] = np.load(dataset_root + '/train/negative/' + str(i) + '/feat.npy') for i in range(0, test_num): image_test[i][:] = np.load(dataset_root + '/test/' + str(i) +
def find_in_path(path, filename): for i in path.split(':'): if os_path_exists('/'.join([i, filename])): return True return False