def submit(): s = Submission(homework, part_names, srcs, output) try: s.submit() except Exception as ex: template = "An exception of type {0} occured. Messsage:\n{1!r}" message = template.format(type(ex).__name__, ex.args) print(message)
def _getSubmissions(self): filenames = os.listdir('./') submissions = {} for filename in filenames: # we only focus on directory if not os.path.isdir(filename): continue sub = Submission(filename) netid = sub.netid submissions[netid] = sub return submissions
def submissions(): submissionsDatabase = app.mongo.db.submissions problemsDatabase = app.mongo.db.problems print(submissionsDatabase) submissionsCursor = submissionsDatabase.find({}).limit(100).sort([('Submission Time', -1)]) submissions = list() for submission in submissionsCursor: submissions.append(Submission(submission, problemsDatabase)) for submission in submissions: print(submission.submissionTime) return app.render_template('submissions.html', submissions=submissions)
def viewOneSubmissions(submissionId, form): file = open("static/css/styles/styles.txt", "r") themes = list() for line in file: themes.append(line[:-1]) file.close() if form.get("themes") != None: preferedTheme = form.get("themes") else: preferedTheme = "atom-one-dark" submissionsDatabase = app.mongo.db.submissions submission = Submission(submissionsDatabase.find({"Submission Id": submissionId})[0], app.mongo.db.problems) language = str(submission.language).lower() return app.render_template('submitted_Code_viewer.html', submission=submission, language=language, themes=themes, preferedTheme=preferedTheme)
def __init__(self, welcome=False, **kwargs): super(MainScreen, self).__init__(**kwargs) self.orientation = 'vertical' self.current_ex = 'ex1' self.current_file = 'warmUpExercise.py' self.submit_ob = Submission() self.element = resourceHandler() if welcome: welcome_popup = Popup(title='Coursera ML in Python', content=Label(text='Hello World'), size_hint=(1, 1)) self.add_widget(welcome_popup) welcome_popup.open() Clock.schedule_once(self.start_app, 3) else: self.bind(size=self.draw_screen)
def filter_submissions(path, rerun_flag): with open("FailedNBs.txt", "w") as f: f.write('') submissions = list(sorted(path.glob('*.ipynb'))) #print(submissions) final_submissions = [] ids = [] for s in range(len(submissions)-1, -1, -1): fname, _ = submissions[s].name.split(".") # ID_TRIAL.ext student_id, trial_no = fname.split("_") #Split ID and TRIAL nr #print('Student ID:', student_id, ',Trial NO:', trial_no, ' -->', submissions[s]) if student_id in ids: continue else: try: processed_submission = Submission(student_id, str(submissions[s]), trial_no, rerun_flag) final_submissions.append(processed_submission) ids.append(student_id) except Exception as e: with open("FailedNBs.txt", "a") as f: f.write(str(student_id) + '\n') print('Error 1.1: Error Reading Submission of Student ID ' + student_id + '-->', e) return final_submissions
fname = srcs[part_id - 1].rsplit('.', 1)[0] mod = __import__(fname, fromlist=[fname], level=1) func = getattr(mod, fname) if part_id == 1: idx = func(X, C) return sprintf('%0.5f ', idx[1] + 1) elif part_id == 2: centroids = func(X, idx, 3) return sprintf('%0.5f ', centroids) elif part_id == 3: U, S, V = func(X) return sprintf('%0.5f ', abs(np.hstack( (U.T.flatten(), S.T.flatten())))) elif part_id == 4: X_proj = func(X, Z, 5) return sprintf('%0.5f ', X_proj.T.flatten()) elif part_id == 5: X_rec = func(X[:, :5], Z, 5) return sprintf('%0.5f ', X_rec.T.flatten()) s = Submission(homework, part_names, srcs, output) try: s.submit() except Exception as ex: template = "An exception of type {0} occured. Messsage:\n{1!r}" message = template.format(type(ex).__name__, ex.args) print(message)
fname = srcs[part_id - 1].rsplit(".", 1)[0] mod = __import__(fname, fromlist=[fname], level=1) func = getattr(mod, fname) if part_id == 1: sim = func(x1, x2, 2) return sprintf("%0.5f ", sim) elif part_id == 2: data = scipy.io.loadmat("ex6data3.mat") X = data["X"] y = data["y"].flatten() Xval = data["Xval"] yval = data["yval"].flatten() C, sigma = func(X, y, Xval, yval) return sprintf("%0.5f ", np.hstack((C, sigma))) elif part_id == 3: word_indices = np.array(func(ec)) return sprintf("%d ", (word_indices + 1).tolist()) elif part_id == 4: x = func(wi) return sprintf("%d", x) s = Submission(homework, part_names, srcs, output) try: s.submit() except Exception as ex: template = "An exception of type {0} occured. Messsage:\n{1!r}" message = template.format(type(ex).__name__, ex.args) print message
from Submission import Submission from sklearn.metrics import accuracy_score, auc, roc_auc_score, classification_report from sklearn.model_selection import train_test_split, cross_val_score from config import * print('test began') #objects dataProcessor = Preprocessor() transactionData = dataProcessor.get_data() #original given data featureFilter = FeatureFilter() algorithm = Models() result = ResultLog() miniProcessor = MiniProcessor(transactionData) submission = Submission() #data for training X_train = dataProcessor.get_train_attributes() y_train = dataProcessor.get_train_labels() #data for test X_test = dataProcessor.get_test_attributes() y_test = dataProcessor.get_test_labels() ''' First Learning to classify the rows into early, ontime, late ''' trainedModel = algorithm.decision_tree(decision_tree_dict, X_train, y_train, X_test, y_test) #save trained model
def main(notify): db = sqlite3.connect('data/submissions.sqlite') g = nx.Graph() for row in db.execute('SELECT * FROM submissions'): submission = Submission(row) from_sub = submission.subreddit.lower() initialize_node(g, from_sub) g.node[from_sub]['post_count'] += 1 # weighten edges if submission.is_x_post(): to_sub = submission.get_sub_from_url().lower() initialize_node(g, to_sub) g.node[from_sub]['x_post_count'] += 1 g.node[to_sub]['x_post_to_count'] += 1 if g.has_edge(from_sub, to_sub): g[from_sub][to_sub]['weight'] += 1 else: g.add_edge(from_sub, to_sub, weight=1) # keep track of self posts if submission.is_self_post(): g.node[from_sub]['self_count'] += 1 """ Now I want to remove reddits who primarily link to other content on reddit (z.B. x-post reddits like bestof and worstof) And also remove nodes that are relatively weakly linked, I.E. degree of 1 or 0 """ # remove nodes (subs) that are x-post too heavily (>40%) # or those whose weighted degree is less than a certain number to_remove = [] for n in g: data = g.node[n] removed = False # avoid division by zero in the next if-block if data['post_count'] == 0: data['post_count'] = 1 # the too heavy x-posters if data['x_post_count'] / data['post_count'] > .2: print "%s removed" % n to_remove.append(n) removed = True # the low weights weight = 0 for e in g.edges(n, data=True): weight += e[2]['weight'] if weight < 15 and not removed: print "%s for low weight" % n to_remove.append(n) # now remove them # TODO do better than this for n in to_remove: g.remove_node(n) db.close() nx.write_gexf(g, 'data/output.gexf')