def main(): filename_input = "datasets/forever_alone.in" number_turns, sat_list, collection, photo_list = read_file(filename_input) sat_dict = {} for i in range(len(sat_list)): my_id = sat_list[i].id sat_dict[my_id] = [] positions = sat_list[i].initList(number_turns) for k in tqdm(range(len(positions))): pos = positions[k] sat_dict[my_id] += [ list_photos_in_range(sat_list[i], pos, photo_list, k) ] for t in tqdm(range(number_turns)): for sat_id in sat_dict.keys(): photo_in_frame = sat_dict[sat_id][t] index_of_pic_taken = -1 for q in range(len(photo_in_frame)): photo = photo_in_frame[q] if sat_list[sat_id].isInRange(photo[1]): sat_list[sat_id].track(photo[1]) for p in range(1, len(photo)): photo[p].instagram(t, sat_id) index_of_pic_taken = q break if index_of_pic_taken >= 0: del photo_in_frame[index_of_pic_taken] sat_list[sat_id].updatePosition(t) write_file("output.txt", collection)
def train(train_file, output_path): logger.info("Start training %d models...", len(MODELS.keys())) train = read_file(train_file) X, y = train.drop(columns=[TARGET_NAME]), train[TARGET_NAME] output_pathobj = Path(output_path) for name, model in MODELS.items(): logger.debug("Training model %s", name) model.fit(X, y) logger.info("Score for model %s: %d", name, model.score(X, y)) dump(model, output_pathobj / name)
def run_sde(n_runs = 100, n_steps=100, X0=0.0, stop_time=1.0, drift=lambda t: 0.0 ): time = (arange(n_steps)/float(n_steps))*stop_time drift = array([ drift(t) for t in time ]) fl = open(dir + 'drift', 'w+' ) for k in range(len(drift)): fl.write(str(drift[k]) + '\n' ) fl.close() os.system( "./a.out " + str(n_runs) + " " + str(n_steps) + " " + str(X0) + " " + str(stop_time) + " " + "drift" ) X = array(read_file( dir + "X" )) M = array(read_file( dir + "M" )) PX = array(read_file( dir + "PX" )) QX = array(read_file( dir + "QX" )) VX = array(read_file( dir + "VX" )) return { 'time': time, 'X': X, 'M': M, 'drift': drift, 'PX': PX, 'QX': QX, 'VX': VX }
def split(input_file, output_filepath): """ Splits the `input_file` into a train set and a test set that are written to `output_filepath`. """ logger.info("Splitting into train and test set...") output_dir = Path(output_filepath) dataset = read_file(input_file) logger.info("Dataset has %d lines.", len(dataset.index)) train, test = train_test_split(dataset) logger.info("Train set has %d lines.", len(train.index)) logger.info("Test set has %d lines.", len(test.index)) to_file(train, output_dir / "train.csv") to_file(test, output_dir / "test.csv")
def evaluate(test_file, model_path, output_file): logger.info("Start evaluating %d models...", len(METRICS.keys())) test = read_file(test_file) X, y = test.drop(columns=[TARGET_NAME]), test[TARGET_NAME] model_pathobj = Path(model_path) metrics_per_model = {} for name, metrics in METRICS.items(): logger.debug("Evaluating model %s", name) model = load(model_pathobj / name) y_pred = model.predict(X) metric_results = {metric.__name__: metric(y, y_pred) for metric in metrics} logger.info("Metrics for model %s: %s", name, metric_results) metrics_per_model[name] = metric_results with open(output_file, "w+") as f: json.dump(metrics_per_model, f, indent=2)
def main(email, password, book): requests_session = requests.Session() choose = main_menu() if email is None: email = click.prompt('Goodreads email ') if password is None: password = click.prompt('Goodreads password ', hide_input=True) token = login(requests_session, email, password) # def check_credentials() if choose == 'Enter a Book Name': book_id = get_book_id(book) add_book_to_shelf(requests_session, token, book_id) elif choose == 'Upload a file with Book Names': books_titles = read_file() for title in books_titles: book_id = get_book_id(title) add_book_to_shelf(requests_session, token, book_id)
def funcion_diego(sig): params, ary_s, ary_b, ary_d, cinv = read_file(sig) manager = BinFitBox(params, ary_s, ary_b, ary_d, cinv) manager.Params["mu"].setVal(1) manager.Params["mu"].constant = True manager.createFit() manager.fit.migrad() #manager.fit.hesse() #manager.fit.minos() chi2sb = manager.fit.get_fmin()['fval'] manager.Params["mu"].setVal(0) manager.createFit() manager.fit.migrad() chi2b = manager.fit.get_fmin()['fval'] chi_new = chi2sb - chi2b return chi_new
#################################################################################################### # 2015A7PS0116P # Abhishek V Joshi #################################################################################################### from Tkinter import * import ttk from read_file import * from bayes_net_definitions import * import copy content = read_file() bn = BayesNet(content) variables = copy.deepcopy(bn.variables) def display_result(): values = [[item.get() for item in i] for i in var] query_pos = [ variables[iterator] for iterator, value in enumerate(values[0]) if value > 0 ] query_neg = [ '~' + variables[iterator] for iterator, value in enumerate(values[1]) if value > 0 ] queries = query_pos + query_neg evi_pos = [ variables[iterator] for iterator, value in enumerate(values[2]) if value > 0
from read_file import * from new_user import * list_of_object_names = [] list_of_names = read_file('names.txt') for name in list_of_names: list_of_object_names.append(NewUsers(name)) print(list_of_object_names[0].user_info())
from read_file import * import matplotlib.pyplot as plt from myPegasos import myPegasos from mySoftplus import mySoftplus from myPegasos import featureNormalize def draw_plot(axis, k): for i in range(5): mP = mySoftplus(X, y, 1e-4, k) axis.plot(mP.lossf) X, y = read_file('MNIST-13.csv') X = featureNormalize(X) f, axarr = plt.subplots(3, 2, figsize=(8, 8)) axarr[0, 0].set_title('k = 1') draw_plot(axarr[0, 0], 1) axarr[0, 1].set_title('k = 20') draw_plot(axarr[0, 1], 20) axarr[1, 0].set_title('k = 200') draw_plot(axarr[1, 0], 200) axarr[1, 1].set_title('k = 1000') draw_plot(axarr[1, 1], 1000) axarr[2, 0].set_title('k = 2000') draw_plot(axarr[2, 0], 2000) plt.tight_layout() # plt.savefig('../tex/figure/sgd.pdf', transparent=True, dpi=600) plt.show()
for p in players_ratings: df = pd.read_csv(stats_file) if p in df['Player'].values: pts = float(df[df.Player == p].PTS.values[0]) fg = float(df[df.Player == p].FG.values[0]) fga = float(df[df.Player == p].FGA.values[0]) ft = float(df[df.Player == p].FT.values[0]) fta = float(df[df.Player == p].FTA.values[0]) orb = float(df[df.Player == p].ORB.values[0]) drb = float(df[df.Player == p].DRB.values[0]) stl = float(df[df.Player == p].STL.values[0]) ast = float(df[df.Player == p].AST.values[0]) blk = float(df[df.Player == p].BLK.values[0]) tov = float(df[df.Player == p].TOV.values[0]) player = Player(p, players_ratings[p][0], pts, fg, fga, ft, fta, orb, drb, stl, ast, blk, tov) else: player = Player(p, players_ratings[p][0], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) players.append(player) season_games = [] season_games = read_file(season_file) features = get_features(season_games, teams, players) write_features_file(features, features_file)
from pylab import * sys.path.append("/workspace/mathlib/tools/python/") from read_file import * dir = "/workspace/output/SDE/test/" time = read_file( dir + "time" ) QX = read_file( dir + "QX" ) plot( time, QX) show()
def word_occurrences_dict(text): lines = read_file(text) line_words = words(lines) word_occurences = word_count(line_words) return word_occurences
from pylab import * sys.path.append("/workspace/mathlib/tools/python/") from read_file import * dir = "/workspace/output/SDE/test/" X = read_file( dir + "X" ) M = read_file( dir + "M" ) time = read_file( dir + "time" ) Xm = read_file( dir + "X_mean" ) Y = [ t*t/2 for t in time ] XM = read_file( dir + "XM" ) QX = read_file( dir + "QX" ) """ for k in range(4): plot( time, X[k]) plot( time, Xm) plot( time, Y) show() """ for k in range(1): plot( time, X[k]) plot( time, QX) plot( time, Xm)
from pylab import * sys.path.append("/workspace/mathlib/tools/python/") from read_file import * dir = "/workspace/output/SDE/project1/hitting_distribution/" dist = read_file( dir + "distribution" )[0] dist_nodrift = read_file( dir + "distribution_nodrift" )[0] dist_nodrift_nochange = read_file( dir + "distribution_drift_nochange" )[0] plot(dist, 'ro' ) plot(dist_nodrift, 'bo' ) plot(dist_nodrift_nochange, 'go' ) show()
from 6_filesInPython.6_importingOurOwnFile.file_operations import save_to_file, read_file save_to_file('Rakib', 'data.txt') print(read_file('demo.txt'))
from read_file import * read_file('/1355557253115928/1.py')
from pylab import * sys.path.append("/workspace/mathlib/tools/python/") from read_file import * dir = "/workspace/output/SDE/test/" X = read_file( dir + "X" ) time = read_file( dir + "time" ) Xm = read_file( dir + "X_mean" ) for k in range(20): plot( time, X[k]) plot( time, Xm) show()
parser.add_argument("--softening",type=int,default=0, help="softening constant") # parser.add_argument("-o","--outfile",type=argparse.FileType('w'), # help="save results to file, '-' is stdout") parser.add_argument("--energy",action="store_const",const=Energy(), help="calculate and plot the energy of the system") parser.add_argument("--period",type=int,metavar="i",nargs="?",const=1, help="calculate and output the orbtal period of body i") parser.add_argument("--maxpoints",type=int, help="maximum plotted line length in data points") parser.add_argument("file",nargs="+", help="files containing the mass," " initial position and velocity of the bodies") args = parser.parse_args() m,x,v,names,styles = read_file(args.file) # change the position and velocity of the Sun so that # the centre of mass is stationary and at the origin x[0] = -numpy.sum(x[1:]*m[1:,numpy.newaxis],axis=0)/m[0] v[0] = -numpy.sum(v[1:]*m[1:,numpy.newaxis],axis=0)/m[0] solar_system = N_bodies(m,x,v,args.dt,args.softening) v = solar_system.v_correction() dt = args.dt t = args.time pr = None # pr = printer() # for solar velocity print out if args.graph: plot = Plotter(dict(zip(names,styles)),x,axis=[-9e11,9e11,-9e11,9e11,-9e11,9e11],