def setRole(baseDir: str,nickname: str,domain: str, \ project: str,role: str) -> bool: """Set a person's role within a project Setting the role to an empty string or None will remove it """ # avoid giant strings if len(role) > 128 or len(project) > 128: return False actorFilename = baseDir + '/accounts/' + nickname + '@' + domain + '.json' if not os.path.isfile(actorFilename): return False with open(actorFilename, 'r') as fp: actorJson = commentjson.load(fp) if role: # add the role if project == 'instance' and 'role' == 'moderator': addModerator(baseDir, nickname, domain) if actorJson['roles'].get(project): if role not in actorJson['roles'][project]: actorJson['roles'][project].append(role) else: actorJson['roles'][project] = [role] else: # remove the role if project == 'instance': removeModerator(baseDir, nickname) if actorJson['roles'].get(project): actorJson['roles'][project].remove(role) # if the project contains no roles then remove it if len(actorJson['roles'][project]) == 0: del actorJson['roles'][project] with open(actorFilename, 'w') as fp: commentjson.dump(actorJson, fp, indent=4, sort_keys=False) return True
def save_config(config, save_dir): if not os.path.exists(save_dir): os.makedirs(save_dir) config_output_path = os.path.join(save_dir, 'config.json') with open(config_output_path, 'w') as f: json.dump(config, f, indent=4, sort_keys=True)
def expireShares(baseDir: str, nickname: str, domain: str) -> None: """Removes expired items from shares """ handleDomain = domain if ':' in handleDomain: handleDomain = domain.split(':')[0] handle = nickname + '@' + handleDomain sharesFilename = baseDir + '/accounts/' + handle + '/shares.json' if os.path.isfile(sharesFilename): with open(sharesFilename, 'r') as fp: sharesJson = commentjson.load(fp) currTime = int(time.time()) deleteItemID = [] for itemID, item in sharesJson.items(): if currTime > item['expire']: deleteItemID.append(itemID) if deleteItemID: for itemID in deleteItemID: del sharesJson[itemID] # remove any associated images itemIDfile = baseDir + '/sharefiles/' + nickname + '/' + itemID if os.path.isfile(itemIDfile + '.png'): os.remove(itemIDfile + '.png') if os.path.isfile(itemIDfile + '.jpg'): os.remove(itemIDfile + '.jpg') if os.path.isfile(itemIDfile + '.gif'): os.remove(itemIDfile + '.gif') with open(sharesFilename, 'w') as fp: commentjson.dump(sharesJson, fp, indent=4, sort_keys=True)
def savePlayer(player, masterDB, path = str(Config.get('Players', 'Location')) + "/"): #print(path) DB = loadPlayersDB(forceLowercase = False) for p in DB: if (player['name'] + ".player").lower() == p.lower(): #print("found the file") #print(p) with open(path + p, "r") as read_file: temp = commentjson.load(read_file) #print(temp) silentRemove(path + player['name'] + ".player") #print("removed file") newPlayer = deepcopy(temp) #print(newPlayer) #newPlayer['pwd'] = hash_password(temp['pwd']) newPlayer['pwd'] = temp['pwd'] for key in newPlayer: if key != "pwd": # print(key) newPlayer[key] = player[key] #print(newPlayer) #print("Saving player state") with open(path + player['name'] + ".player", 'w') as fp: commentjson.dump(newPlayer, fp) #print("Updating playerd DB") masterDB = loadPlayersDB()
def capabilitiesReceiveUpdate(baseDir :str, \ nickname :str,domain :str,port :int, \ actor :str, \ newCapabilitiesId :str, \ capabilityList :[], debug :bool) -> bool: """An update for a capability or the given actor has arrived """ ocapFilename= \ getOcapFilename(baseDir,nickname,domain,actor,'granted') if not ocapFilename: return False if not os.path.isfile(ocapFilename): if debug: print('DEBUG: capabilities file not found during update') print(ocapFilename) return False with open(ocapFilename, 'r') as fp: ocapJson=commentjson.load(fp) ocapJson['id']=newCapabilitiesId ocapJson['capability']=capabilityList with open(ocapFilename, 'w') as fp: commentjson.dump(ocapJson, fp, indent=4, sort_keys=False) return True return False
def createConfig(baseDir: str) -> None: """Creates a configuration file """ configFilename = baseDir + '/config.json' if os.path.isfile(configFilename): return configJson = {} with open(configFilename, 'w') as fp: commentjson.dump(configJson, fp, indent=4, sort_keys=True)
def setSkills(baseDir: str, nickname: str, domain: str, skills: {}) -> None: actorFilename = baseDir + '/accounts/' + nickname + '@' + domain + '.json' if not os.path.isfile(actorFilename): return False with open(actorFilename, 'r') as fp: actorJson = commentjson.load(fp) actorJson['skills'] = skills with open(actorFilename, 'w') as fp: commentjson.dump(actorJson, fp, indent=4, sort_keys=False)
def capabilitiesAccept(baseDir: str,httpPrefix: str, \ nickname: str,domain: str, port: int, \ acceptedActor: str, saveToFile: bool, \ acceptedCaps=["inbox:write","objects:read"]) -> {}: # This gets returned to capabilities requester # This could also be added to a follow Accept activity # reject excessively long actors if len(acceptedActor)>256: return None fullDomain=domain if port: if port!=80 and port !=443: if ':' not in domain: fullDomain=domain+':'+str(port) # make directories to store capabilities ocapFilename= \ getOcapFilename(baseDir,nickname,fullDomain,acceptedActor,'accept') if not ocapFilename: return None ocapAccept=None # if the capability already exists then load it from file if os.path.isfile(ocapFilename): with open(ocapFilename, 'r') as fp: ocapAccept=commentjson.load(fp) # otherwise create a new capability if not ocapAccept: acceptedActorNickname=getNicknameFromActor(acceptedActor) if not acceptedActorNickname: print('WARN: unable to find nickname in '+acceptedActor) return None acceptedActorDomain,acceptedActorPort=getDomainFromActor(acceptedActor) if acceptedActorPort: ocapId=acceptedActorNickname+'@'+acceptedActorDomain+':'+ \ str(acceptedActorPort)+'#'+createPassword(32) else: ocapId=acceptedActorNickname+'@'+acceptedActorDomain+'#'+ \ createPassword(32) ocapAccept = { "@context": "https://www.w3.org/ns/activitystreams", "id": httpPrefix+"://"+fullDomain+"/caps/"+ocapId, "type": "Capability", "capability": acceptedCaps, "scope": acceptedActor, "actor": httpPrefix+"://"+fullDomain } if nickname: ocapAccept['actor']=httpPrefix+"://"+fullDomain+'/users/'+nickname if saveToFile: with open(ocapFilename, 'w') as fp: commentjson.dump(ocapAccept, fp, indent=4, sort_keys=False) return ocapAccept
def convert_and_save_phreeqc_based(fname): df = read_phreeqc_as_df(fname) reactions_solution_species = create_db_reaction_entries_solutions(df) reactions_phase_species = create_db_reaction_entries_phases(df) with open("reactions_solutions.json", "w") as json_file: json.dump(reactions_solution_species, json_file) with open("reactions_solids.json", "w") as json_file: json.dump(reactions_phase_species, json_file)
def SaveToJsonFile(obj, json_file_name, sort_keys=True): pathlib.Path(json_file_name).parent.mkdir(parents=True, exist_ok=True) with codecs.open(json_file_name, 'w', encoding='utf-8') as outfile: json.dump(obj, outfile, indent=4, ensure_ascii=False, sort_keys=sort_keys, default=_save_to_json_process_) return
def setToLight(self, setupJson, setting_filename): if "color_scheme" not in setupJson: print("no color scheme found in file: " +setting_filename) elif "color_scheme_light" not in setupJson: print("no light color scheme found") else: print("setting color scheme to contents of color_scheme_light") setupJson["color_scheme"] = setupJson["color_scheme_light"] with open (os.path.join(users_path,setting_filename), "w") as setup: commentjson.dump(setupJson,setup, indent=4, sort_keys=True)
def setConfigParam(baseDir: str, variableName: str, variableValue) -> None: """Sets a configuration value """ createConfig(baseDir) configFilename = baseDir + '/config.json' with open(configFilename, 'r') as fp: configJson = commentjson.load(fp) configJson[variableName] = variableValue with open(configFilename, 'w') as fp: commentjson.dump(configJson, fp, indent=4, sort_keys=True)
def _merge_launch_json(self, new_launch_json): vscode_dir = os.path.join(os.getcwd(), ".vscode") self.utility.ensure_dir(vscode_dir) launch_json_file = os.path.join(vscode_dir, "launch.json") if os.path.exists(launch_json_file): launch_json = commentjson.loads(self.utility.get_file_contents(launch_json_file)) launch_json['configurations'].extend(new_launch_json['configurations']) with open(launch_json_file, "w") as f: commentjson.dump(launch_json, f, indent=2) else: with open(launch_json_file, "w") as f: commentjson.dump(new_launch_json, f, indent=2)
def write_template_to_file(self): """ Serializes self.template to string and writes it to the file named in config['global']['output'] """ indent = 0 if not self.config['global']['print_debug'] else 4 with open(self.config['global']['output'], 'w') as output_file: # Here to_json() loads child templates into S3 raw_json = self.template.to_template_json() reloaded_template = json.loads(raw_json) json.dump(reloaded_template, output_file, indent=indent, separators=(',', ':'))
def test_dump(self): test_dict = dict(a=1, b=2) wfp = open(os.path.join(self.path, 'test.json'), 'w') commentjson.dump(test_dict, wfp) wfp.close() rfp = open(os.path.join(self.path, 'test.json'), 'r') j_dump = json.dumps(test_dict) assert rfp.read(), j_dump rfp.close()
def rewrite_sample_configuration(sample_root, hostname, port_number): """Open sample project configuration file and rewrite server hostname, port number, username file path and password file path. """ test_configuration = path.join(sample_root, 'config_development.jsonc') with open(test_configuration, 'r') as f: config = json.load(f) config['BACKEND']['target_server_hostname'] = hostname config['BACKEND']['sql_port'] = port_number config['BACKEND']['username_file'] = path.join(sample_root, 'username') config['BACKEND']['password_file'] = path.join(sample_root, 'password') with open(test_configuration, 'w') as f: json.dump(config, f)
def set(self, key, value=None): with open(self.pathname, "w+") as f: extraSettings = json.load(f) if value: self.settings[key] = value extraSettings[key] = value setattr(self, key, self.get(key)) else: self.settings.update(key) extraSettings.update(key) for i in key: setattr(self, i, self.get(i)) json.dump(extraSettings, f, indent=4, sort_keys=True)
def update_codio(): codio_json = read_codio_json() preview_section = codio_json.get('preview', OrderedDict()) if 'Virtual Desktop' in preview_section: preview_section['Virtual Desktop'] = 'https://{{domain3000}}/' else: preview_section = OrderedDict([('Virtual Desktop', 'https://{{domain3000}}/')] + preview_section.items()) codio_json['preview'] = preview_section with open(run_file, 'w+') as file: json.dump(codio_json, file, indent=4, separators=(',', ': ')) return codio_json
def save(self): """ Saves the file at the already provided path currently unused. Warns ---- On failed save """ try: with open(self.config_path, 'w') as file: json.dump(self, file, indent=4, sort_keys=True) except: warnings.warn("Error: Configuration not saved")
def updateAnnounceCollection(postFilename: str,actor: str,debug: bool) -> None: """Updates the announcements collection within a post Confusingly this is known as "shares", but isn't the same as shared items within shares.py. It's shares of posts, not shares of physical objects. """ with open(postFilename, 'r') as fp: postJsonObject=commentjson.load(fp) if not postJsonObject.get('object'): if debug: pprint(postJsonObject) print('DEBUG: post '+announceUrl+' has no object') return if not isinstance(postJsonObject['object'], dict): return postUrl=postJsonObject['id'].replace('/activity','')+'/shares' if not postJsonObject['object'].get('shares'): if debug: print('DEBUG: Adding initial shares (announcements) to '+postUrl) announcementsJson = { "@context": "https://www.w3.org/ns/activitystreams", 'id': postUrl, 'type': 'Collection', "totalItems": 1, 'items': [{ 'type': 'Announce', 'actor': actor }] } postJsonObject['object']['shares']=announcementsJson else: if postJsonObject['object']['shares'].get('items'): for announceItem in postJsonObject['object']['shares']['items']: if announceItem.get('actor'): if announceItem['actor']==actor: return newAnnounce={ 'type': 'Announce', 'actor': actor } postJsonObject['object']['shares']['items'].append(newAnnounce) postJsonObject['object']['shares']['totalItems']= \ len(postJsonObject['object']['shares']['items']) else: if debug: print('DEBUG: shares (announcements) section of post has no items list') if debug: print('DEBUG: saving post with shares (announcements) added') pprint(postJsonObject) with open(postFilename, 'w') as fp: commentjson.dump(postJsonObject, fp, indent=4, sort_keys=True)
def test_dump_with_kwargs(self): test_dict = dict(a=1, b=2) test_kwargs = dict(indent=4) wfp = open(os.path.join(self.path, 'test.json'), 'w') commentjson.dump(test_dict, wfp, **test_kwargs) wfp.close() rfp = open(os.path.join(self.path, 'test.json'), 'r') j_dump = json.dumps(test_dict, **test_kwargs) c_dump = rfp.read() assert c_dump == j_dump, c_dump rfp.close()
def capabilitiesGrantedSave(baseDir :str, \ nickname :str,domain :str,ocap: {}) -> bool: """A capabilities accept is received, so stor it for reference when sending to the actor """ if not ocap.get('actor'): return False ocapFilename= \ getOcapFilename(baseDir,nickname,domain,ocap['actor'],'granted') if not ocapFilename: return False with open(ocapFilename, 'w') as fp: commentjson.dump(ocap, fp, indent=4, sort_keys=False) return True
def storeFollowRequest(baseDir: str, \ nicknameToFollow: str,domainToFollow: str,port: int, \ nickname: str,domain: str,fromPort: int, \ followJson: {}, \ debug: bool) -> bool: """Stores the follow request for later use """ accountsDir=baseDir+'/accounts/'+nicknameToFollow+'@'+domainToFollow if not os.path.isdir(accountsDir): return False approveHandle=nickname+'@'+domain if fromPort: if fromPort!=80 and fromPort!=443: if ':' not in domain: approveHandle=nickname+'@'+domain+':'+str(fromPort) followersFilename=accountsDir+'/followers.txt' if os.path.isfile(followersFilename): if approveHandle in open(followersFilename).read(): if debug: print('DEBUG: '+ \ nicknameToFollow+'@'+domainToFollow+ \ ' already following '+approveHandle) return True # add to a file which contains a list of requests approveFollowsFilename=accountsDir+'/followrequests.txt' if os.path.isfile(approveFollowsFilename): if approveHandle not in open(approveFollowsFilename).read(): with open(approveFollowsFilename, "a") as fp: fp.write(approveHandle+'\n') else: if debug: print('DEBUG: '+approveHandle+' is already awaiting approval') else: with open(approveFollowsFilename, "w") as fp: fp.write(approveHandle+'\n') # store the follow request in its own directory # We don't rely upon the inbox because items in there could expire requestsDir=accountsDir+'/requests' if not os.path.isdir(requestsDir): os.mkdir(requestsDir) followActivityfilename=requestsDir+'/'+approveHandle+'.follow' with open(followActivityfilename, 'w') as fp: commentjson.dump(followJson, fp, indent=4, sort_keys=False) return True return False
def setAvailability(baseDir: str,nickname: str,domain: str, \ status: str) -> bool: """Set an availability status """ # avoid giant strings if len(status) > 128: return False actorFilename = baseDir + '/accounts/' + nickname + '@' + domain + '.json' if not os.path.isfile(actorFilename): return False with open(actorFilename, 'r') as fp: actorJson = commentjson.load(fp) actorJson['availability'] = status with open(actorFilename, 'w') as fp: commentjson.dump(actorJson, fp, indent=4, sort_keys=False) return True
def flatten_lang(input_dir, output_dir): print("Flattening lang files...", end="", flush=True) count = 0 key_count = 0 for filename in os.listdir(input_dir): if not filename.endswith(".json"): continue with open(path.join(input_dir, filename)) as lang: obj = commentjson.load(lang) out = {} key_count += recursive_parse(obj, out, "") # TODO configurable outfile? with open(path.join(output_dir, filename), "w") as outfile: commentjson.dump(out, outfile, sort_keys=True) count += 1 print("\rFlattened {} lang files with {} keys".format(count, key_count))
def setDisplayNickname(baseDir: str,nickname: str, domain: str, \ displayName: str) -> bool: if len(displayName) > 32: return False handle = nickname.lower() + '@' + domain.lower() filename = baseDir + '/accounts/' + handle.lower() + '.json' if not os.path.isfile(filename): return False personJson = None with open(filename, 'r') as fp: personJson = commentjson.load(fp) if not personJson: return False personJson['name'] = displayName with open(filename, 'w') as fp: commentjson.dump(personJson, fp, indent=4, sort_keys=False) return True
def undoAnnounceCollectionEntry(postFilename: str,actor: str, \ debug: bool) -> None: """Undoes an announce for a particular actor by removing it from the "shares" collection within a post. Note that the "shares" collection has no relation to shared items in shares.py. It's shares of posts, not shares of physical objects. """ with open(postFilename, 'r') as fp: postJsonObject=commentjson.load(fp) if not postJsonObject.get('type'): return if postJsonObject['type']!='Create': return if not postJsonObject.get('object'): if debug: pprint(postJsonObject) print('DEBUG: post has no object') return if not isinstance(postJsonObject['object'], dict): return if not postJsonObject['object'].get('shares'): return if not postJsonObject['object']['shares'].get('items'): return totalItems=0 if postJsonObject['object']['shares'].get('totalItems'): totalItems=postJsonObject['object']['shares']['totalItems'] itemFound=False for announceItem in postJsonObject['object']['shares']['items']: if announceItem.get('actor'): if announceItem['actor']==actor: if debug: print('DEBUG: Announce was removed for '+actor) postJsonObject['object']['shares']['items'].remove(announceItem) itemFound=True break if itemFound: if totalItems==1: if debug: print('DEBUG: shares (announcements) was removed from post') del postJsonObject['object']['shares'] else: postJsonObject['object']['shares']['totalItems']= \ len(postJsonObject['object']['shares']['items']) with open(postFilename, 'w') as fp: commentjson.dump(postJsonObject, fp, indent=4, sort_keys=True)
def setBio(baseDir: str, nickname: str, domain: str, bio: str) -> bool: if len(bio) > 32: return False handle = nickname.lower() + '@' + domain.lower() filename = baseDir + '/accounts/' + handle.lower() + '.json' if not os.path.isfile(filename): return False personJson = None with open(filename, 'r') as fp: personJson = commentjson.load(fp) if not personJson: return False if not personJson.get('summary'): return False personJson['summary'] = bio with open(filename, 'w') as fp: commentjson.dump(personJson, fp, indent=4, sort_keys=False) return True
def setOrganizationScheme(baseDir: str,nickname: str,domain: str, \ schema: str) -> bool: """Set the organization schema within which a person exists This will define how roles, skills and availability are assembled into organizations """ # avoid giant strings if len(schema) > 256: return False actorFilename = baseDir + '/accounts/' + nickname + '@' + domain + '.json' if not os.path.isfile(actorFilename): return False with open(actorFilename, 'r') as fp: actorJson = commentjson.load(fp) actorJson['orgSchema'] = schema with open(actorFilename, 'w') as fp: commentjson.dump(actorJson, fp, indent=4, sort_keys=False) return True
def _map_station_airport(self): airports_by_simple_name: Dict[str, List[Airport]] = {} for airport in self.fly.airports.values(): if airport.simple_name not in airports_by_simple_name: airports_by_simple_name[airport.simple_name] = [] airports_by_simple_name[airport.simple_name].append(airport) with open("rail/mapping_names.json", "r") as f: mapping_corrections: Dict[str, List[str]] = commentjson.load(f) station_to_airports: Dict[Station, List[Airport]] = {} for station in self.rail.station_list: if station.code in mapping_corrections: airport_codes = [ airport_code for airport_code in mapping_corrections[station.code] ] station_to_airports[station] = [] for airport_code in airport_codes: try: station_to_airports[station].append( self.fly.airports[airport_code]) except KeyError: continue # this Airport was not important enough to make the cut. elif station.simple_name in airports_by_simple_name: station_to_airports[station] = airports_by_simple_name[ station.simple_name] else: station_to_airports[station] = [] airport_to_station: Dict[Airport, Station] = { airport: station for station, airports in station_to_airports.items() for airport in airports } with open("airport2station.json", "w") as f: commentjson.dump( { airport.code: station.code for airport, station in airport_to_station.items() }, f) return station_to_airports, airport_to_station
def Apply_Changes(): global Choose_background_transparency, no_transparency_var global data, default_shell_checkbox_var global Background_color_hexStr global chooseCmd, next, CmdNum cmdName = Cmdvariable.get() for CmdNum in range(len(cmdLinePref) - 1): if cmdName == cmdLinePref[CmdNum]['name']: break if ClrScheme_var.get() in COLOR_SCHEMES: cmdLinePref[CmdNum]['colorScheme'] = ClrScheme_var.get() cmdLinePref[CmdNum]['hidden'] = hidden_checkbox_var.get() if No_background_checkbox_var.get( ) == False and not Background_color_hexStr == "": cmdLinePref[CmdNum]['background'] = Background_color_hexStr else: if 'background' in cmdLinePref[CmdNum]: del cmdLinePref[CmdNum]['background'] cmdLinePref[CmdNum]['fontFace'] = font['family'] cmdLinePref[CmdNum]['fontSize'] = font['size'] id = "" if default_shell_checkbox_var.get(): id = cmdLinePref[CmdNum]['guid'] data['defaultProfile'] = id print(no_transparency_var.get()) if no_transparency_var.get(): cmdLinePref[CmdNum]['useAcrylic'] = False else: cmdLinePref[CmdNum]['useAcrylic'] = True cmdLinePref[CmdNum]['acrylicOpacity'] = float( Choose_background_transparency.get() / 100) if background_img_checkbox_var.get(): cmdLinePref[CmdNum][ 'backgroundImage'] = "ms-appdata:///roaming/yourimage" + Image_extension else: if 'backgroundImage' in cmdLinePref[CmdNum]: del cmdLinePref[CmdNum]['backgroundImage'] with open(pathToSettingFile, 'w') as outfile: commentjson.dump(data, outfile)
def clearModeratorStatus(baseDir: str) -> None: """Removes moderator status from all accounts This could be slow if there are many users, but only happens rarely when moderators are appointed or removed """ directory = os.fsencode(baseDir + '/accounts/') for f in os.listdir(directory): filename = os.fsdecode(f) if filename.endswith(".json") and '@' in filename: filename = os.path.join(baseDir + '/accounts/', filename) if '"moderator"' in open(filename).read(): with open(filename, 'r') as fp: actorJson = commentjson.load(fp) if actorJson['roles'].get('instance'): if 'moderator' in actorJson['roles']['instance']: actorJson['roles']['instance'].remove('moderator') with open(filename, 'w') as fp: commentjson.dump(actorJson, fp, indent=4, sort_keys=False)
def test_dump(self): test_dict = dict(a=1, b=2) wfp = open(os.path.join(self.path, 'test.json'), 'w') c_dump = commentjson.dump(test_dict, wfp) wfp.close() rfp = open(os.path.join(self.path, 'test.json'), 'r') j_dump = json.dumps(test_dict) assert rfp.read(), j_dump rfp.close()
def setSkillLevel(baseDir: str,nickname: str,domain: str, \ skill: str,skillLevelPercent: int) -> bool: """Set a skill level for a person Setting skill level to zero removes it """ if skillLevelPercent < 0 or skillLevelPercent > 100: return False actorFilename = baseDir + '/accounts/' + nickname + '@' + domain + '.json' if not os.path.isfile(actorFilename): return False with open(actorFilename, 'r') as fp: actorJson = commentjson.load(fp) if not actorJson.get('skills'): actorJson['skills'] = {} if skillLevelPercent > 0: actorJson['skills'][skill] = skillLevelPercent else: del actorJson['skills'][skill] with open(actorFilename, 'w') as fp: commentjson.dump(actorJson, fp, indent=4, sort_keys=False) return True
def test_dump_with_kwargs(self): test_dict = dict(a=1, b=2) test_kwargs = dict(indent=4) wfp = open(os.path.join(self.path, 'test.json'), 'w') c_dump = commentjson.dump(test_dict, wfp, **test_kwargs) wfp.close() rfp = open(os.path.join(self.path, 'test.json'), 'r') j_dump = json.dumps(test_dict, **test_kwargs) assert rfp.read(), j_dump rfp.close()
def writeToFormattedJSON(filename, dictionary): ''' Write a dictionary to JSON, but use proper readable formatting ''' with open(filename, 'w') as f: json.dump(dictionary, f, indent=4, separators=(',', ': '))
def update_file(self): with open(self.filename, 'w') as file_handle: commentjson.dump(self.data, file_handle)
def save_operations(operations, path, indent = 2): with open(path, 'w+') as fh: return dump(operations, fh, indent = indent, sort_keys = False)
def mag_corr_loop(U_array, J_array, dJ_array, jobdef, jobdef_file, model, temp_modelfile, orb_type, number_decimals): """ Function mag_corr_loop is designed to run over the U, J and dJ values to fill up the mag_corr dictionary. INPUTS TYPE DESCRIPTION U_array nparray All the U values in a numpy array. J_array nparray All the J values in a numpy array. dJ_array nparray All the dJ values in a numpy array. jobdef dict The dictionary that defines the tight binding job to be run. jobdef_file str The name of the jobdef file. model dict The dictionary that contains the model system for the tight binding job to be run. temp_modelfile str The name of the model file. orb_type str On-site orbital symmetry. either s, p or d. number_decimals int The number of decimal places to report values of U, J and dJ. OUTPUTS TYPE DESCRIPTION SuccessFlag bool If all the tight binding simulations are successful then this is returned as True, if any are not successful the loop is exited and this is returned as False. mag_corr dict A dictionary containing value of the magnetic correlation at each value of U, J and dJ. """ # initialise the mag_corr dictionary mag_corr = {} SuccessFlag = True for U in U_array: for J in J_array: print "U = ", U, "\t J = ", J for dJ in dJ_array: # if J > U: # mag_corr[U, J, dJ] = 0.0 # else: model['species'][0]["U"] = round(U, number_decimals) if orb_type == "p": model['species'][0]["I"] = round(J, number_decimals) elif orb_type == "d": model['species'][0]["I"] = round(J, number_decimals) model['species'][0]["dJ"] = round(dJ, number_decimals) # write out the new modelfile with open(temp_modelfile, 'w') as f: commentjson.dump(model, f, sort_keys=True, indent=4, separators=(',', ': ')) try: SCFflag, mag_corr[round(U, number_decimals), round(J, number_decimals), round(dJ, number_decimals)] = TB.main() # if we end up with a linear algebra error then we can re-run with a different optimisation scheme. except numpy.linalg.linalg.LinAlgError: # store original optimisation routine choice old_routine = jobdef['optimisation_routine'] if old_routine == 1: # then set it to routine 2 jobdef['optimisation_routine'] = 2 else: # then set it to routine 1 jobdef['optimisation_routine'] = 1 # write jobdef back to file with open(jobdef_file, 'w') as f: commentjson.dump(jobdef, f, sort_keys=True, indent=4, separators=(',', ': ')) # and run again print("SCF did not converge. Re-running simulation with different optimisation routine. ") SCFflag, mag_corr[round(U, number_decimals), round(J, number_decimals), round(dJ, number_decimals)] = TB.main() # reset optimisation routine jobdef['optimisation_routine'] = old_routine # write jobdef back to file with open(jobdef_file, 'w') as f: commentjson.dump(jobdef, f, sort_keys=True, indent=4, separators=(',', ': ')) # If the SCF has converged then we can trust the result if SCFflag == True: pass # If the SCF flag is False and this was an SCF calculation then rerun elif jobdef["scf_on"] == 1: # Use a smaller value of A (divide by 10) jobdef["A"] = jobdef["A"]/10.0 # Increase number of steps by a factor of 10 jobdef["scf_max_loops"] = int(jobdef["scf_max_loops"]*10) # write jobdef back to file with open(jobdef_file, 'w') as f: commentjson.dump(jobdef, f, sort_keys=True, indent=4, separators=(',', ': ')) # and run again print("SCF did not converge. Re-running simulation with smaller mixing value. ") SCFflag, mag_corr[round(U, number_decimals), round(J, number_decimals), round(dJ, number_decimals)] = TB.main() # Re-set the jobdef variables: jobdef["A"] = jobdef["A"]*10.0 jobdef["scf_max_loops"] = int(jobdef["scf_max_loops"]/10) with open(jobdef_file, 'w') as f: commentjson.dump(jobdef, f, sort_keys=True, indent=4, separators=(',', ': ')) # If that still hasn't worked, exit gracefully... if SCFflag == False: SuccessFlag = False print "SCF did not converge for U = ", round(U, number_decimals), "; J = ", round(J, number_decimals), "; dJ = ", round(dJ, number_decimals) print "Exiting." return SuccessFlag, mag_corr return SuccessFlag, mag_corr
def make_magmomcorr_graphs(numeperatom): Verbose = 1 number_decimals = 6 orb_type = "p" plotname = "../output_PyLATO/Mag_Corr_"+orb_type+"_"+str(numeperatom) op_sq_name="\\frac{1}{3} \langle :\hat{\mathbf{m}}_1.\hat{\mathbf{m}}_2:\\rangle" U_min = 0.005 U_max = 10 U_num_steps = 100 J_min = 0.005 J_max = 5 J_num_steps = 100 dJ_min = 0 dJ_max = 1 dJ_num_steps = 1 U_array, U_step = np.linspace(U_min, U_max, num=U_num_steps, retstep=True) # test U_array = np.append(U_array,U_max+U_step) if orb_type == "s": J_array = [0.0] J_step = 0.0 dJ_array = [0.0] dJ_step = 0.0 elif orb_type == "p": J_array, J_step = np.linspace(J_min, J_max, num=J_num_steps, retstep=True) # test J_array = np.append(J_array,J_max+J_step) dJ_array = [0.0] dJ_step = 0.0 elif orb_type == "d": J_array, J_step = np.linspace(J_min, J_max, num=J_num_steps, retstep=True) dJ_array, dJ_step = np.linspace(dJ_min, dJ_max, num=dJ_num_steps, retstep=True) # test J_array = np.append(J_array,J_max+J_step) dJ_array = np.append(dJ_array,dJ_max+dJ_step) else: print("ERROR: orb_type must be 's', 'p' or 'd'. Exiting. ") sys.exit() jobdef_file = "JobDef.json" jobdef_backup = "JobDef_backup.json" # Make a backup of the JobDef file shutil.copyfile(jobdef_file, jobdef_backup) # Read in the JobDef file with open(jobdef_file, 'r') as f: jobdef = commentjson.loads(f.read()) # Read in the model file modelfile = "models/TBcanonical_"+orb_type+".json" model_temp = "TBcanonical_"+orb_type+"_temp" temp_modelfile = "models/"+model_temp+".json" with open(modelfile, 'r') as f: model = commentjson.loads(f.read()) # Copy and paste the regular python model to one with the same temp name model_python = "models/TBcanonical_"+orb_type+".py" model_python_temp = "models/"+model_temp+".py" shutil.copyfile(model_python, model_python_temp) # make sure that the number of electrons is correct. model['species'][0]["NElectrons"] = numeperatom # change the model and Hamiltonian in jobdef jobdef["Hamiltonian"] = orb_type+"case" jobdef["model"] = model_temp # make sure that the scf is on jobdef["scf_on"] = 1 # write jobdef back to file with open(jobdef_file, 'w') as f: commentjson.dump(jobdef, f, sort_keys=True, indent=4, separators=(',', ': ')) #pdb.set_trace() magFlag, mag_corr = mag_corr_loop(U_array, J_array, dJ_array, jobdef, jobdef_file, model, temp_modelfile, orb_type, number_decimals) # clean up temp files os.remove(temp_modelfile) os.remove(model_python_temp) os.remove(model_python_temp+"c") # restore backup of JobDef.json shutil.copyfile(jobdef_backup, jobdef_file) os.remove(jobdef_backup) # Make the plot if the mag_corr_loop was successful if magFlag == True: Plot_OpSq_U_J(Verbose,mag_corr,orb_type,plotname,U_min,U_step,U_num_steps,J_min,J_step,J_num_steps,dJ_min,dJ_step,dJ_num_steps,op_sq_name, number_decimals) else: print("Simulation failed.")
def write_model(model, model_name): model_file = "models/{}.json".format(model_name) with open(model_file, 'w') as file_handle: commentjson.dump(model, file_handle)