def retrospective(acct): rtnstring = "" subaccts = [] output = runledger(commands['retrospective'] + ["^"+acct]) for line in output.split('\n'): subaccts += line.split(acct)[-1:] #Remove the starting ":" from the subaccount name subaccts = [subacct[1:] for subacct in subaccts] #Determine which accounts are excluded before iterating excluded = [subacct for ex in exclude['retrospective'] for subacct in subaccts if (unicode(acct + ":" + subacct).find(ex) != -1)] subaccts = [subacct for subacct in subaccts if (subacct not in excluded)] retro = [] for subacct in subaccts: fullname = acct + ":" + subacct #print retrospective of subaccts with at least 7 transactions when viewed weekly over the last 12 months output = runledger(commands['last12months'] + ['-J', 'register'] + ["^" + fullname]) if(len(output.split('\n')) < 6): continue safename = safe_name(fullname) plot.main("./build/" + safename + "retro", commands['last12months'] + ['-J', 'register'] + ["^" + fullname] + ["-f", LEDGER_FILE]) retro.append({'name': subacct, 'plotfile': safename + "retro"}) return retro
def forecast(acct): rtnstring = "" #identify budgeted subaccts subaccts = [] output = runledger(commands['acctbudget'] + ["^"+acct]) for line in output.split('\n'): subaccts += line.split(acct)[-1:] #Remove the starting ":" from the subaccount name subaccts = [subacct[1:] for subacct in subaccts] #Determine which accounts are excluded before iterating excluded = [subacct for ex in exclude['forecast'] for subacct in subaccts if (unicode(acct + ":" + subacct).find(ex) != -1)] subaccts = [subacct for subacct in subaccts if (subacct not in excluded)] forecast = [] for subacct in subaccts: fullname = acct + ":" + subacct safename = safe_name(fullname) file = "./build/" + safename + "forecast" args = commands['next12months'] + ['-J', 'register'] + ["^" + fullname] + ["-f", LEDGER_FILE] #print file, ' ', ' '.join(args) plot.main(file, args) #forecast.append({'name': subacct, 'plotfile': safename + "forecast"}) return forecast
def main(args): """ Main running script """ # Get the config file config = util.get_config(args.config) root_dir = config['ROOT_DIR'] # fill out initial folders if not os.path.isdir('{}/metadata'.format(root_dir)): os.mkdir('{}/metadata'.format(root_dir)) print('created metadata dir') if not os.path.isdir('{}'.format(config['OBS_ROOT'])): os.mkdir('{}'.format(config['OBS_ROOT'])) print('created OBS dir') if not os.path.isdir('{}'.format(config['ESTIMATORS_ROOT'])): os.mkdir('{}'.format(config['ESTIMATORS_ROOT'])) print('created ESTIMATORS dir') if not os.path.isdir('{}'.format(config['PREDICTIONS_ROOT'])): os.mkdir('{}'.format(config['PREDICTIONS_ROOT'])) print('created PREDICTIONS dir') if not os.path.isdir('{}'.format(config['QAQC_ROOT'])): os.mkdir('{}'.format(config['QAQC_ROOT'])) print('created QAQC dir') if not os.path.isdir('{}'.format(config['PLOT_ROOT'])): os.mkdir('{}'.format(config['PLOT_ROOT'])) print('created PLOT dir') # --- download data --- if args.clean: clean.main(config) else: print('skipping database cleaning') # --- download data --- if args.download: download.main(config) else: print('skipping download of new data') # --- train models if args.train: train.main(config) else: print('skip training') # --- make predictions --- if args.predict: predict.main(config) else: print('skipping download of new data') # --- run qaqc checks --- if args.qaqc: qaqc.main(config) else: print('skipping qaqc') # --- plot --- if args.plot: plot.main(config) else: print('skipping plots')
def main(): parser = argparse.ArgumentParser() parser.add_argument('mode', choices=['train', 'plot', 'eval', 'hogwild']) # IO arguments. parser.add_argument('--name', type=str, default='text8.10k', help='name for model') parser.add_argument('--vocab-dir', type=str, default='data/vocab', help='input path for vocabulary') parser.add_argument('--matrix-dir', type=str, default='data/cooccur', help='input path for cooccurence matrix') parser.add_argument('--out-dir', type=str, default='vec', help='ouput directory to write vectors') parser.add_argument('--model-dir', type=str, default='models', help='directory to save model') parser.add_argument('--log-dir', type=str, default='log', help='directory log losses') parser.add_argument('--vec-path', type=str, default='', help='path to load vectors for plotting') parser.add_argument('--gensim-format', action='store_true', help='save vectors in gensim format') # Model arguments. parser.add_argument('--emb-dim', type=int, default=50, help='dimension of vectors') # Train arguments. parser.add_argument('--num-updates', type=int, default=10000, help='number of parameter updates') parser.add_argument('--batch-size', type=int, default=512, help='size of minibatches') parser.add_argument('--lr', type=float, default=1e-2, help='learning rate') parser.add_argument('--seed', type=int, default=42, help='random seed') parser.add_argument('--use-schedule', action='store_true', help='using scheduler for optimizer') parser.add_argument('--save-every', type=int, default=1000, help='how often to save the model parameters') parser.add_argument('--print-every', type=int, default=100, help='how often to print loss to screen') # Plot arguments. parser.add_argument('--tsne', action='store_true', help='plot tsne') parser.add_argument('--matrices', action='store_true', help='plot matrices and decomposition') args = parser.parse_args() if args.mode == 'train': train.main(args) if args.mode == 'plot': plot.main(args) if args.mode == 'hogwild': hogwild.main(args)
def refreshPlot(f0, f1, c0, c1): f0.clf() f1.clf() path = "/home/rba/Downloads/RBA-DAQ_multisensor/trunk" os.chdir(path) files = sorted(os.listdir(os.getcwd()), key=os.path.getctime) newest = files[-1] plot.main(path+"/"+newest, f0, f1) canvas0.show() canvas1.show()
def getTuneView(self): global lb1 sel = lb1.curselection() if len(sel) == 1: actualDirToShow = lb1.get(sel) plot.main(actualDirToShow) elif len(sel) == 2: actualDirToShow = lb1.get(sel[0]) actualDirToShow2 = lb1.get(sel[1]) plotTunesCompare.main(actualDirToShow, actualDirToShow2)
def main(): Createfolder = CreateFolders(absdir) # Createfolder.create_folder(absdir) Createfolder.__init__(absdir) if deleteOldData == "on": Createfolder.deleteOldData(absdir) # This script plots the cpptraj created data-files import qsub_hpc # If qsub is specified the ccptraj, R and plot scripts will be submitted to the hpc-queue. import cpptraj_analysis # This script runs the cpptraj module for merging mdcrd files and making the analysis on the combined dcd file. The analysis includes rmsd, distance and clustering import R_analysis # This script runs the "bio3d" package of "R" - which makes the principal component analysis import plot import CombinedPlots import MMPBSA_analysis if qsub == None: CPPTRAJ = cpptraj_analysis.main() if R_Analysis == "on": RPlot = R_analysis.main() if MMPBSA == "on": mmpbsa = MMPBSA_analysis.main() if makePlots == "on": Plot = plot.main() if makeHistPlots == "on": HistPlot = CombinedPlots.main() else: submit = qsub_hpc.main()
def main(): print("Start word2vec") print("Input source name") project = input() merge_source.sub(project) print("is code? [y/n]") iscode = input() if iscode == "y": replace.sub(project) else: mecab.sub(project) model.sub(project) result.sub(project) print("plot? [y]") iscode = input() if iscode == "y": plot.main(project)
def main(): # Process the command line arguments args = processCommandLine(sys.argv) if args["modulePaths"] != "": paths = args["modulePaths"].split(';') for path in paths: sys.path.append(path) import rig2py import rig2pyHelper import plot # Create a rest pose global restPose restPose = rig2pyHelper.createRestPose() print("Joint order: " + str(rig2pyHelper.jointOrder)) # Set our callbacks rig2py.errorCallback(onError) rig2py.frameCallback(onFrame) # Load all data from the file (will invoke the callback many times) try: rig2py.read(args["inputJson"]) except RuntimeError as e: print(e) return # Plot all the txt files we created # TODO: use the frame number instead of assuming zero files = [] for file in os.listdir("./"): if file.endswith(".txt"): files.append(file) plot.main(files)
def main(exp_name, x_name, args, params): print(f"Running exp with param {x_name}", params) if args.debug != '': exit() title = "loss" os.makedirs(exp_name, exist_ok=True) with open(f'{exp_name}/args.json', 'w') as f: dict = args.__dict__ dict["changed_parameter"] = x_name dict["changed_parameter_values"] = params json.dump(dict, f, indent=4) g = graph_utils.create_fc_graph(args.h, args.w) edges_num_dict = graph_utils.numerate_edges(g) start_node = 0 finish_node = list(g.nodes)[-1] solutions_hoef, solutions_dro, solutions_dro_cropped, c_worst_hoef, c_worst_dro, c_bar = run_experiments( args, g, edges_num_dict, start_node, finish_node, x_name, params) print(f"Finished exp, {x_name}") np.save(f'{exp_name}/c_worst_dro.npy', c_worst_dro) np.save(f'{exp_name}/c_worst_hoef.npy', c_worst_hoef) np.save(f'{exp_name}/c_bar.npy', c_bar) np.save(f'{exp_name}/solutions_hoef.npy', solutions_hoef) np.save(f'{exp_name}/solutions_dro.npy', solutions_dro) np.save(f'{exp_name}/solutions_dro_cropped.npy', solutions_dro_cropped) np.save(f'{exp_name}/params.npy', params) count_costs = args.costs == 'true' count_percentage = args.percentage_mode == 'true' plot.main(exp_name, x_name, title, args) if count_costs: plot.main(exp_name, x_name, title.replace("loss", "costs"), args, count_costs=count_costs) if count_percentage: plot.main(exp_name, x_name, title + ' percentage', args, count_percentage=count_percentage)
data = json.load(file)[1] return data except FileNotFoundError: print("Data file not found.") exit() def get_values(data): """Get a list of all first chromosomes' objective values.""" values = [population[0]["fitness"] for population in data] return values def get_new_values(values): """Record any changes higher. Its size is the same as its argument's.""" new_value = values[0] for index, value in enumerate(values): if value < new_value: print(index) if __name__ == "__main__": # get_new_values(get_values(get_data())) things = [thing["fitness"] for thing in get_data()[2]] # for thing in things: # thing["genes"] = 0 plot.main(things) # things = get_data()[29][0] # print(beautifier(things))
import plot import train import util from config import Config from logger import Logger # main function if __name__ == '__main__': args = Config.getArgs () util.printHeader () if args.plot: plot.main () elif args.image is None: Logger.log ( "Going to train network!" ) train.main () else: plot.plotOutput ()
def main(dataset, expert_cost=100, rloss=10, total_cost=100000, runs=1, y_len=2500, suf=""): expert_cost = int(expert_cost) rloss = int(rloss) total_cost = int(total_cost) runs = int(runs) #print dataset, expert_cost directory = 'exp_' + dataset + suf + '/' os.makedirs(directory) logging.basicConfig(filename=directory + '/log.log', level=logging.DEBUG) start.main(dataset) (je, adata) = active.experi_money(start.mat, start.rel, start.turk_data, start.turk_data_uncer, runs, (total_cost, 1, expert_cost), stra='je', rloss=rloss) save_disk(directory, 'je.pkl', je) (jc, adata) = active.experi_money(start.mat, start.rel, start.turk_data, start.turk_data_uncer, runs, (total_cost, 1, expert_cost), stra='jc', rloss=rloss) save_disk(directory, 'jc.pkl', jc) (cde, adata) = active.experi_money(start.mat, start.rel, start.turk_data, start.turk_data_uncer, runs, (total_cost, 1, expert_cost), stra='cde', rloss=rloss) save_disk(directory, 'cde.pkl', cde) #(dec5, adata) = active.experi_money(start.mat, start.rel, start.turk_data, start.turk_data_uncer, 5, (100000, 1, expert_cost), stra = 'dec5', rloss = rloss) #save_disk('dec5.pkl', dec5) (dec5_ip, adata) = active.experi_money(start.mat, start.rel, start.turk_data, start.turk_data_uncer, runs, (total_cost, 1, expert_cost), stra='dec5_ip', rloss=rloss) save_disk(directory, 'dec5_ip.pkl', dec5_ip) #PLOT plot.main(directory, y_len)
if b1<b2: s = b1+'_'+b2 else: s = b2 + '_' + b1 # print(s) new_topos.append(s) result = [] for nt in new_topos: if nt not in topo_names: result.append(nt) return result if __name__ == '__main__': in_path = "/Users/rafalmaselek/Projects/CheckMateCalculations/results/fastlim/FASTLIM_OUT" slha_path = "/Users/rafalmaselek/Projects/CheckMateCalculations/results/fastlim/SLHA_FIX" points = mplt.main(in_path, slha_path)[0] procs = list(chain.from_iterable([p.procs for p in points])) topo = mplt.parse_topo('topologies.txt') # analyze processes to extract particles for p in procs+topo: p.analyze_process(None, True) if not validate_topo(p): cmssm.drawFullTree(p.decayTree) # print('#'*40) procs = list(set(procs)) topo = set(topo) print('Looking for new topologies...') new_topos = []
def test_one(self): plot.main()
p.add_argument("cities", nargs="*", help="list of cities to plot") p.add_argument("--destructive", "--delete", "-d", action="store_true", dest="destructive", help="delete files after plotting them") p.add_argument("--no-pressure", "-np", dest="pressure", action="store_false", help="do not plot pressure data") p.add_argument("--no-humidity", "-nh", dest="humidity", action="store_false", help="do not plot humidity data") args = p.parse_args() categories = ["temp", "feels_like"] if args.pressure: categories.append("pressure") if args.humidity: categories.append("humidity") load.get_weather(args.cities) plot.main(categories) if args.destructive: for f in os.listdir("data/"): os.remove("data/" + f)