def main(): #各種ファイル名 data_dir = "./data/" json_name = data_dir + "data.json" csv_name = data_dir + "score_list.csv" tweet7_json = data_dir + "tweet7.json" day7_json = data_dir + "day7.json" week7_json = data_dir + "week7.json" pn_dict = data_dir + "pn_ja.dic" setting_name = data_dir + "setting.txt" #変数 prev_tweet_num = 10 get_tweet_num_by_time = 3 sum_score = 0 #データ関係のファイルを消去 remove_file_names = [ json_name, csv_name, setting_name, tweet7_json, day7_json, week7_json ] removeFiles(remove_file_names) #フロント側の設定完了を待つ account_id, goal = waitSetting(setting_name) #各種オブジェクトを生成,初期設定 #account オブジェクト user = account.Account(account_id) #dataManager オブジェクト dm = dataManager.DataManager() dm.setJson(json_name) dm.loadJson() dm.setCsv(csv_name) dm.loadCsv() #dataSaverオブジェクト ds = dataSaver.DataSaver(tweet7_json, day7_json, week7_json) #analyzer オブジェクト anlzr = analyzer.Analyzer() anlzr.loadPnDict(pn_dict) #監視の前にデータを取得しておく for tweet_id, tweet_info in user.getTimeline(prev_tweet_num, 1).items(): date = tweet_info["date"] tweet_text = tweet_info["tweet"] mentions = tweet_info["mentions"] #score = anlzr.pnDictScore(tweet_text) score = anlzr.gcnlScore(tweet_text) sum_score += score high_score_words = [] json_dict = { tweet_id: { "date": date, "tweet": tweet_text, "mentions": mentions, "score": score, "sum_score": sum_score, "high_score_words": high_score_words } } dm.updateDatabase(json_dict) date_score_list = dm.getCsv() ds.updateSubTotalJsons(date_score_list) while True: print("monitoring timeline...") time.sleep(10) for tweet_id, tweet_info in user.getTimeline(get_tweet_num_by_time, 1).items(): date = tweet_info["date"] tweet_text = tweet_info["tweet"] mentions = tweet_info["mentions"] if dm.hasData(tweet_id): print("Attayo!") continue #score = anlzr.pnDictScore(tweet_text) score = anlzr.gcnlScore(tweet_text) sum_score += score high_score_words = [] json_dict = { tweet_id: { "date": date, "tweet": tweet_text, "mentions": mentions, "score": score, "sum_score": sum_score, "high_score_words": high_score_words } } dm.updateDatabase(json_dict) date_score_list = dm.getCsv() ds.updateSubTotalJsons(date_score_list)
def manageData(): # Get current MP based on current date currentDate = datetime.datetime.today() currentDateObj = date.Date(currentDate.day, currentDate.month, currentDate.year) # currentMPindex = 3 # for i in range(0,4): # mpStartDate = date.Date(mpStartDates[i][0],mpStartDates[i][1],mpStartDates[i][2]) # if(currentDateObj.compareToDateObj(mpStartDate)<0): # currentMPindex = i-1 # if(currentMPindex<0): # Current date is outside of school year # currentMPindex = 3 # 4th marking period numDaysInSchoolYear = 0 for mp in range(0, currentMPindex + 1): # print("MP: " + str(mp+1)) startDate = date.datetime.date(mpStartDates[mp][2], mpStartDates[mp][1], mpStartDates[mp][0]) endDate = date.datetime.date(mpStartDates[mp + 1][2], mpStartDates[mp + 1][1], mpStartDates[mp + 1][0] - 1) daysInMP[mp] = endDate.__sub__(startDate).days numDaysInSchoolYear += daysInMP[mp] #endDate.__sub__(startDate).days firstDay = datetime.date(mpStartDates[0][2], mpStartDates[0][1], mpStartDates[0][0]) print("FIRST DAY: ") print(firstDay) # for mp in range(len(coursesAllMPs)): # for c in range(len(coursesAllMPs[mp])): # print(coursesAllMPs[mp][c].courseName) # print("\n") # exit(0) print("NUM DAYS IN SCHOOL YEAR: " + str(numDaysInSchoolYear)) data = [[[list]]] * (numDaysInSchoolYear + 1) for m in range(0, currentMPindex + 1): print("M: " + str(m)) courseDataAllMPs = [[]] dm = dataManager.DataManager(studentName, studentID, coursesAllMPs[m], m) for c in range(len(coursesAllMPs[m])): dailyGrades = dm.getDailyCourseGradesForMP(c, m) for d in range(len(dailyGrades)): daysSinceStartOfYear = dailyGrades[d][0].__sub__( firstDay).days - 1 # print(str(firstDay) + "\t" + str(dailyGrades[d][0]) + "\t" + str(daysSinceStartOfYear)) # for asdf in range(len(dailyGrades[d])): # print(dailyGrades[d][asdf]) # print(daysSinceStartOfYear) try: data[daysSinceStartOfYear - 1].append(dailyGrades[d]) except IndexError: print("IndexError") # exit(0) allData = [ [[[]]] ] # [course][[[date,grade,ptsRec,ptsW],...] for mp1, [[date,grade,ptsRec,ptsW],...] for mp2, ...] # allData[0][0][0][1] = [AP ENG/LANG & COMP][mp1][2019-09-01][grade] for c in range(0, len(coursesAllMPs[0])): courseDataAllMPs = [[[]]] for x in range(0, currentMPindex + 1): mp = x dm = dataManager.DataManager(studentName, studentID, coursesAllMPs[mp], mp) dailyGrades = dm.getDailyCourseGradesForMP(c, mp) courseDataAllMPs.append(dailyGrades) courseDataAllMPs.pop(0) allData.append(courseDataAllMPs) allData.pop(0) # allData2 = [list]*len(allData[0][0]) allData2 = [[[]]] # [[date[course[infotype]]]...for all dates] for m in range(0, len(allData[0])): for d in range(len(allData[0][m])): # for every day in MP if (d <= numDaysInSchoolYear): arrC = [] for c in range(len(coursesAllMPs[m])): # for every course arr = [ allData[c][m][d][1], allData[c][m][d][2], allData[c][m][d][3] ] arrC.append(arr) arrDC = [allData[0][m][d][0], arrC] # [date,[info1,info2,info3]] allData2.append(arrDC) allData2.pop(0) # OUTPUT DATA TO FILE try: outfile = open("dailyGradesFile.txt", "x") except FileExistsError: outfile = open("dailyGradesFile.txt", "w") try: # json_outfile = open("gradesDictionary.json",'rb+') # except: # print("RB+ DID NOT WORK ----------------------------------") # try: json_outfile = open("gradesDictionary.json", 'x') except: json_outfile = open("gradesDictionary.json", 'w') json_outfile.write("[") mpIndexPlus1 = 1 for d in range(len(allData2)): dDate = date.Date(allData2[d][0].day, allData2[d][0].month, allData2[d][0].year) endDate = date.Date(mpStartDates[4][0], mpStartDates[4][1], mpStartDates[4][2]) if (dDate.compareToDateObj(endDate) >= 0): break # Stop at last day of school # print(str(allData2[d][0])) outfile.write(str(allData2[d][0]) + "\n") # write date for c in range(len(allData2[d][1])): try: if (date.Date(allData2[d][0].day, allData2[d][0].month, allData2[d][0].year).compareToDMY( mpStartDates[mpIndexPlus1][0], mpStartDates[mpIndexPlus1][1], mpStartDates[mpIndexPlus1][2]) >= 0): mpIndexPlus1 += 1 lstLbls = ["Date", "Course", "Grd", "PR", "PW"] lst = [] if (len(coursesAllMPs[mpIndexPlus1 - 1][c].code) > 0): lst = [ str(allData2[d][0]), coursesAllMPs[mpIndexPlus1 - 1][c].courseName, allData2[d][1][c][0], allData2[d][1][c][1], allData2[d][1][c][2] ] # print("\t{0:30} {1:15} {2:15} {3:15}".format(coursesAllMPs[mpIndexPlus1-1][c].courseName+":: ","Grd:"+str(allData2[d][1][c][0]),"PR:"+str(allData2[d][1][c][1]),"PW:"+str(allData2[d][1][c][2]))) outfile.write("\t{0:30} {1:15} {2:15} {3:15}\n".format( coursesAllMPs[mpIndexPlus1 - 1][c].courseName + ":: ", "Grd:" + str(allData2[d][1][c][0]), "PR:" + str(allData2[d][1][c][1]), "PW:" + str(allData2[d][1][c][2]))) else: lst = [ str(allData2[d][0]), coursesAllMPs[mpIndexPlus1 - 1][c].courseName, "----", "----", "----" ] # print("\t{0:30} {1:15} {2:15} {3:15}".format(coursesAllMPs[mpIndexPlus1-1][c].courseName+":: ","Grd:----","PR:----","PW:----")) outfile.write("\t{0:30} {1:15} {2:15} {3:15}\n".format( coursesAllMPs[mpIndexPlus1 - 1][c].courseName + ":: ", "Grd:----", "PR:----", "PW:----")) with open("gradesDictionary.json", 'a') as json_outfile: json.dump(Convert(lstLbls, lst), json_outfile, indent=2) if (d != len(allData2) - 1 or c != len(allData2[d][1]) - 1): json_outfile.write(",\n") except IndexError: break # json_outfile.close() # # Delete last comma # with open("gradesDictionary.json",'rb+') as json_outfile: # # move to end, then scan forward until a non-continuation byte is found # json_outfile.seek(-1, os.SEEK_END) # while json_outfile.read(1) & 0xC0 == 0x80: # # we just read 1 byte, which moved the file position forward, # # skip back 2 bytes to move to the byte before the current. # json_outfile.seek(-2, os.SEEK_CUR) # # last read byte is our truncation point, move back to it. # json_outfile.seek(-1, os.SEEK_CUR) # json_outfile.truncate() # json_outfile.close() with open("gradesDictionary.json", 'a') as json_outfile: json_outfile.write("]") json_outfile.close()
def train_SPHash(modelSavedPath = '../largeData/model', inputType='Cropped', bitSize=16, num_epochs=30, weightSM = 1.0, weightApp = 1.0, weightFire = 1.0): tf.reset_default_graph() prefix = 'JH_SPDH' + inputType + '_' + str(bitSize) + '_wSM_' + str(weightSM) + '_wApp_' + str(weightApp) + '_wFire_' + str(weightFire) batchSize = 32 dropout_rate = 0.5 display_step = 20 # For ASAN Data num_classes = 6 # filewriter_path = '../modelStore/ASAN/tensorboardPath' # checkpoint_path = '../modelStore/ASAN/modelTrainedPath' filewriter_path = modelSavedPath + '/tensorboardPath' checkpoint_path = modelSavedPath + '/modelTrainedPath' if(not os.path.exists(filewriter_path)): os.mkdir(filewriter_path) if(not os.path.exists(checkpoint_path)): os.mkdir(checkpoint_path) if (inputType == 'Cropped'): trainImg_path = '../largeData/data/exp_jh/bbox_search_img.npy' trainLabel_path = '../largeData/data/exp_jh/bbox_search_label.npy' testImg_path = '../largeData/data/exp_jh/bbox_query_img.npy' testLabel_path = '../largeData/data/exp_jh/bbox_query_label.npy' elif (inputType == 'Activated'): trainImg_path = '../largeData/data/exp_jh/activated_search_img.npy' trainLabel_path = '../largeData/data/exp_jh/activated_search_label.npy' testImg_path = '../largeData/data/exp_jh/activated_query_img.npy' testLabel_path = '../largeData/data/exp_jh/activated_query_label.npy' elif (inputType == 'Origin'): trainImg_path = '../largeData/data/exp_jh/origin_search_img.npy' trainLabel_path = '../largeData/data/exp_jh/origin_search_label.npy' testImg_path = '../largeData/data/exp_jh/origin_query_img.npy' testLabel_path = '../largeData/data/./exp_jh/origin_query_label.npy' else: print ('No available option found for ' + inputType) return print('load train data...') trainData = dataManager.DataManager(imgNpyPath=trainImg_path, labelNpyPath=trainLabel_path, batchSize=batchSize, classNumber=num_classes) print('load test data...') testData = dataManager.DataManager(imgNpyPath=testImg_path, labelNpyPath=testLabel_path, batchSize=batchSize, classNumber=num_classes) trainDataSize = np.shape(trainData.label_data)[0] testDataSize = np.shape(testData.label_data)[0] x = tf.placeholder(tf.float32, [batchSize, 227, 227, 3], name='input_network') y = tf.placeholder(tf.float32, [batchSize, num_classes], name='label_network') keep_prob = tf.placeholder(tf.float32, name='keep_prob') model = SPDH_tf(bitSize=bitSize, x_shape=x, y_shape=y, keep_prob=keep_prob, num_classes=num_classes, skip_layer=['fc8', 'fc7', 'fc6'], weights_path='../largeData/modelAlexnet/bvlc_alexnet.npy', batchSize=batchSize, weightSC=weightSM, weightApp=weightApp, weightFire=weightFire) model.build_basic_graph() model.design_loss() writer = tf.summary.FileWriter(filewriter_path) saver = tf.train.Saver(max_to_keep=savor_max_to_keep) train_batches_per_epoch = int(np.floor(trainDataSize / batchSize)) val_batches_per_epoch = int(np.floor(testDataSize / batchSize)) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) writer.add_graph(sess.graph) model.load_initial_weights(sess) print("{} Start training...".format(datetime.datetime.now())) print("{} Open Tensorboard at --logdir {}".format(datetime.datetime.now(), filewriter_path)) # Loop over number of epochs for epoch in range(num_epochs): print('trainData initializer on GPU') q = trainData.getTrueData() sess.run(trainData.getInitializer(), feed_dict={trainData.x: q[0], trainData.y: q[1]}) print("{} Epoch number: {}".format(datetime.datetime.now(), epoch + 1)) for step in range(train_batches_per_epoch): input_value, label_value = sess.run(trainData.getNextBatchPlaceholder()) # And run the training op # trainLoss, _ = sess.run([loss, optimizer], feed_dict={x: input_value,y: label_value,keep_prob: dropout_rate}) # for TEST p = sess.run(model.latentLayer, feed_dict={model.x: input_value, model.keep_prob: 1.0}) lossTotal, lossSC, lossApp, lossFire, _ = sess.run([model.loss_total, model.loss_sc, model.loss_app, model.loss_fire, model.train_op], feed_dict={model.x: input_value, model.y: label_value, model.keep_prob: dropout_rate}) # trainLoss = sess.run(train_op, feed_dict={x: input_value,y: label_value,keep_prob: dropout_rate}) # Generate summary with the current # # cat batch of data and write to file if step % display_step == 0: s = sess.run(model.merged_summary, feed_dict={model.x: input_value, model.y: label_value, model.keep_prob: 1.}) print('At ' + str(step) + ', train loss: ' + str(lossTotal) + ', train SC: ' + str(lossSC) + ', train App: ' + str(lossApp) + ', trainFire: ' + str(lossFire)) writer.add_summary(s, epoch * train_batches_per_epoch + step) # Validate the model on the entire validation set print("{} Start validation".format(datetime.datetime.now())) test_acc = 0. test_count = 0 print('testData initializer on GPU') p = testData.getTrueData() sess.run(testData.getInitializer(), feed_dict={testData.x: p[0], testData.y: p[1]}) for _ in range(val_batches_per_epoch): img_batch, label_batch = sess.run(testData.getNextBatchPlaceholder()) acc = sess.run(model.accuracy, feed_dict={x: img_batch, y: label_batch, keep_prob: 1.}) test_acc += acc test_count += 1 test_acc /= test_count print("{} Validation Accuracy = {:.4f}".format(datetime.datetime.now(), test_acc)) print("{} Saving checkpoint of model...".format(datetime.datetime.now())) # save checkpoint of the model if(epoch % model_save_step == 0): checkpoint_name = os.path.join(checkpoint_path, prefix + 'model_epoch' + str(epoch + 1) + '.ckpt') save_path = saver.save(sess, checkpoint_name) print("{} Model checkpoint saved at {}".format(datetime.datetime.now(), checkpoint_name))
""" pdata = pd.DataFrame(date_score_list) pdata.columns = ["date", "score"] pdata["date"] = pd.to_datetime(pdata["date"], format="%Y%m%d_%H%M%S") sorted_pdata = pdata.sort_values("date") return sorted_pdata def sendSumScore(self, save_file_name, sum_score): try: f = codecs.open(save_file_name, "w", encoding="utf-8") except OSError as e: print("sendSumScore Error") else: f.write(str(sum_score + 10)) if __name__ == "__main__": csv_name = "./data/score_list.csv" tweets_name = "./data/seven_tweets_scores.json" days_name = "./data/seven_days_scores.json" weeks_name = "./data/seven_weeks_scores.json" import dataManager dm = dataManager.DataManager() dm.setCsv(csv_name) dm.loadCsv() date_score_list = dm.getCsv() ds = DataSaver(tweets_name, days_name, weeks_name) ds.updateSubTotalJsons(date_score_list, data_num=7)
def __init__(self): # some fonts self.headerfont = 'TkHeadingFont' self.cellfont = 'TkTextFont' # initialiye the main window and configure the overall layout self.window = Tk() self.window.title("SkinnerBox Control") self.window.columnconfigure(0, weight=1) self.window.rowconfigure(0, weight=1) self.n = ttk.Notebook(self.window) n = self.n n.columnconfigure(0, weight=1) n.rowconfigure(0, weight=1) w = Frame(n) # first tab: all fish overview self.f2 = Frame(n) # second tab: single fish view self.ctlTab = Frame(n) # third tab: controls n.add(w, text='All Fish') n.add(self.f2, text='Single Fish') n.add(self.ctlTab, text='controls') n.grid(row=0, column=0) w.columnconfigure(0, weight=1) w.columnconfigure(1, weight=1) w.rowconfigure(0, weight=0) w.rowconfigure(1, weight=1) w.rowconfigure(2, weight=1) w.rowconfigure(3, weight=1) Label(w, text='SkinnerBox control', font='TkCaptionFont').grid(row=0, column=0, columnspan=2, sticky=W) # subframes: self.upf = LabelFrame( w, text='controls' ) # update indicator: just a labelled canvas that changes color when the datagrabber is getting new data self.cpf = LabelFrame( w, text='Currently playing fish' ) # currently playing fish: a table of all fish currently running trials self.atf = LabelFrame( w, text='All trials' ) # all trials frame: a dictionary display for summary data accross all trials self.cfd = LabelFrame( w, text='Current fish' ) # current fish display: shows data about a selected fish self.grf = LabelFrame(self.f2, text='Individual Fish') self.rstFrame = LabelFrame(self.ctlTab, text='Reset controllers') # configure the layout of the frames and subframes self.upf.grid(row=1, column=0, columnspan=10, sticky=W + N) self.upf.grid_columnconfigure(0, weight=1) self.upf.grid_columnconfigure(1, weight=1) self.upf.grid_rowconfigure(0, weight=1) self.upf.grid_rowconfigure(1, weight=1) self.cpf.grid(row=2, column=1, sticky=W + N) for i in range(0, 21): self.cpf.grid_rowconfigure(i, weight=1) for i in range(0, 4): self.cpf.grid_columnconfigure(i, weight=1) self.atf.grid(row=2, column=0, sticky=W + N) for i in range(0, 10): self.atf.grid_rowconfigure(i, weight=1) for i in range(0, 2): self.atf.grid_columnconfigure(i, weight=1) self.cfd.grid(row=2, column=2, sticky=W + N) for i in range(0, 10): self.cfd.grid_rowconfigure(i, weight=1) for i in range(0, 2): self.cfd.grid_columnconfigure(i, weight=1) self.grf.grid(row=0, column=0) self.rstFrame.grid(row=0, column=0) # create update indicator label and indicator light Label(self.upf, text='Loading').grid(row=0, column=0, sticky=W) self.upind = Canvas(self.upf, bg='grey', width=20, height=20) self.upind.grid(row=0, column=1, sticky=E) # create buttons on update indicator Button(self.upf, text='Show Graph', command=self.show_graph_callback).grid(row=1, column=0, sticky=W) # create button on conrtol tab Button(self.rstFrame, text='Reset Laptop', command=self.reset_laptop_button_callback).grid(row=0, column=0, sticky=W) Button(self.rstFrame, text='Reset PI_001', command=self.reset_pi_001_button_callback).grid(row=1, column=0, sticky=W) Button(self.rstFrame, text='Reset PI_002', command=self.reset_pi_002_button_callback).grid(row=2, column=0, sticky=W) Button(self.rstFrame, text='Reset PI_003', command=self.reset_pi_003_button_callback).grid(row=3, column=0, sticky=W) Button(self.rstFrame, text='Reset All', command=self.reset_all_button_callback).grid(row=4, column=0, sticky=W) # an area to draw the graphics to self.grf_canvas = Frame(self.grf, border=10, relief=SUNKEN) self.grf_canvas.grid(row=1, column=0, columnspan=3) # make a title for the graphics frame (the name of the fish) self.grf_title = Label(master=self.grf, text="") self.grf_title.grid(row=2, column=1) # create buttons on graphics frame Button(master=self.grf, text='<<', command=self.previous_fish).grid(row=2, column=0) Button(master=self.grf, text='>>', command=self.next_fish).grid(row=2, column=2) Button(master=self.grf, text='show schedules', command=self.show_schedules).grid(row=3, column=2) Button(master=self.grf, text='show dates', command=self.show_dates).grid(row=4, column=2) Button(master=self.grf, text='clear selection', command=self.clear_selection).grid(row=5, column=2) self.schedList = Listbox(self.grf, selectmode=MULTIPLE, width=40) self.schedList.grid(row=3, column=0, rowspan=3, sticky=W) self.dateList = Listbox(self.grf, selectmode=MULTIPLE, width=30) self.dateList.grid(row=3, column=1, rowspan=3, sticky=W) # create the data grabber. the member function passed will display a string on the output widget. self.dm = dataManager.DataManager() self.skp = skinnerplot.skinnerplot()
def manageData(): # Get current MP based on current date currentDate = datetime.datetime.today() currentDateObj = date.Date(currentDate.day, currentDate.month, currentDate.year) currentMPindex = 3 for i in range(0, 4): mpStartDate = date.Date(mpStartDates[i][0], mpStartDates[i][1], mpStartDates[i][2]) if (currentDateObj.compareToDateObj(mpStartDate) < 0): currentMPindex = i - 1 if (currentMPindex < 0): # Current date is outside of school year currentMPindex = 3 # 4th marking period numDaysInSchoolYear = 0 for mp in range(len(coursesAllMPs)): # print("MP: " + str(mp+1)) startDate = date.datetime.date(mpStartDates[mp][2], mpStartDates[mp][1], mpStartDates[mp][0]) endDate = date.datetime.date(mpStartDates[mp + 1][2], mpStartDates[mp + 1][1], mpStartDates[mp + 1][0] - 1) daysInMP[mp] = endDate.__sub__(startDate).days numDaysInSchoolYear += daysInMP[mp] #endDate.__sub__(startDate).days firstDay = datetime.date(mpStartDates[0][2], mpStartDates[0][1], mpStartDates[0][0]) # for mp in range(len(coursesAllMPs)): # for c in range(len(coursesAllMPs[mp])): # print(coursesAllMPs[mp][c].courseName) # print("\n") # exit(0) data = [[[list]]] * (numDaysInSchoolYear + 1) for m in range(len(coursesAllMPs)): courseDataAllMPs = [[]] dm = dataManager.DataManager(studentName, studentID, coursesAllMPs[m], m + 1) for c in range(len(coursesAllMPs[m])): dailyGrades = dm.getDailyCourseGradesForMP(c, m) for d in range(len(dailyGrades)): daysSinceStartOfYear = dailyGrades[d][0].__sub__( firstDay).days - 1 try: data[daysSinceStartOfYear - 1].append(dailyGrades[d]) except IndexError: print("IndexError") allData = [ [[[]]] ] # [course][[[date,grade,ptsRec,ptsW],...] for mp1, [[date,grade,ptsRec,ptsW],...] for mp2, ...] # allData[0][0][0][1] = [AP ENG/LANG & COMP][mp1][2019-09-01][grade] for c in range(0, len(coursesAllMPs[0])): courseDataAllMPs = [[[]]] for x in range(0, currentMPindex + 1): mp = x dm = dataManager.DataManager(studentName, studentID, coursesAllMPs[mp], mp) dailyGrades = dm.getDailyCourseGradesForMP(c, mp) courseDataAllMPs.append(dailyGrades) courseDataAllMPs.pop(0) allData.append(courseDataAllMPs) allData.pop(0) # allData2 = [list]*len(allData[0][0]) allData2 = [[[]]] # [[date[course[infotype]]]...for all dates] for m in range(0, len(allData[0])): for d in range(len(allData[0][m])): # for every day in MP if (d <= numDaysInSchoolYear): arrC = [] for c in range(len(coursesAllMPs[m])): # for every course arr = [ allData[c][m][d][1], allData[c][m][d][2], allData[c][m][d][3] ] arrC.append(arr) arrDC = [allData[0][m][d][0], arrC] # [date,[info1,info2,info3]] allData2.append(arrDC) allData2.pop(0) # OUTPUT DATA TO FILE try: outfile = open("dailyGradesFile.txt", "x") except FileExistsError: outfile = open("dailyGradesFile.txt", "w") with open("gradesDictionary.txt", 'a') as json_outfile: json_outfile.write("[") mpIndexPlus1 = 1 for d in range(len(allData2)): dDate = date.Date(allData2[d][0].day, allData2[d][0].month, allData2[d][0].year) endDate = date.Date(mpStartDates[4][0], mpStartDates[4][1], mpStartDates[4][2]) if (dDate.compareToDateObj(endDate) >= 0): break # Stop at last day of school print(str(allData2[d][0])) outfile.write(str(allData2[d][0]) + "\n") # write date for c in range(len(allData2[d][1])): try: if (date.Date(allData2[d][0].day, allData2[d][0].month, allData2[d][0].year).compareToDMY( mpStartDates[mpIndexPlus1][0], mpStartDates[mpIndexPlus1][1], mpStartDates[mpIndexPlus1][2]) >= 0): mpIndexPlus1 += 1 lstLbls = ["Date", "Course", "Grd", "PR", "PW"] lst = [] if (len(coursesAllMPs[mpIndexPlus1 - 1][c].code) > 0): lst = [ str(allData2[d][0]), coursesAllMPs[mpIndexPlus1 - 1][c].courseName, allData2[d][1][c][0], allData2[d][1][c][1], allData2[d][1][c][2] ] print("\t{0:30} {1:15} {2:15} {3:15}".format( coursesAllMPs[mpIndexPlus1 - 1][c].courseName + ":: ", "Grd:" + str(allData2[d][1][c][0]), "PR:" + str(allData2[d][1][c][1]), "PW:" + str(allData2[d][1][c][2]))) outfile.write("\t{0:30} {1:15} {2:15} {3:15}\n".format( coursesAllMPs[mpIndexPlus1 - 1][c].courseName + ":: ", "Grd:" + str(allData2[d][1][c][0]), "PR:" + str(allData2[d][1][c][1]), "PW:" + str(allData2[d][1][c][2]))) else: lst = [ str(allData2[d][0]), coursesAllMPs[mpIndexPlus1 - 1][c].courseName, "----", "----", "----" ] print("\t{0:30} {1:15} {2:15} {3:15}".format( coursesAllMPs[mpIndexPlus1 - 1][c].courseName + ":: ", "Grd:----", "PR:----", "PW:----")) outfile.write("\t{0:30} {1:15} {2:15} {3:15}\n".format( coursesAllMPs[mpIndexPlus1 - 1][c].courseName + ":: ", "Grd:----", "PR:----", "PW:----")) with open("gradesDictionary.txt", 'a') as json_outfile: json.dump(Convert(lstLbls, lst), json_outfile, indent=2) if (d < len(allData2) or c < len(allData2[d][1])): json_outfile.write(",\n") except IndexError: break with open("gradesDictionary.txt", 'a') as json_outfile: json_outfile.write("]") outfile.close()