def main(argv): if len(argv) != 2: print 'format: main.py <input>' return 1 filesystem = FileSystem() filesystem.initDirs() Logger.write('start explode video') videoProcessor = DefaultVideoProcessor() videoProcessor.splitVideo(argv[1], config.DEFAULT_IMGS_DIR) filterManager = FiltersManager() # filterManager.addFilter(CropFilter(230, 263, 575, 530)) #filterManager.addFilter(PseudoColorFilter()) videoProcessor.work(DiffPrevImageProcessor(), filterManager.release()) #videoProcessor.work(DiffMovingAverageDarkImageProcessor().init(30, 30, 'dark.png'), filterManager.release()) Logger.write('start implode video') targetFilename = '%s_%s.avi' % (argv[1], filterManager.getNames()) videoProcessor.joinVideo(config.DEFAULT_IMGS_OUT_DIR, targetFilename) Logger.write('end of work. created video in %s' % targetFilename) filesystem.deleteDirs() return 0
def rename_file(self): i = self.file_label.text().rfind("\\") new_name = os.path.join(self.file_label.text()[0:i], self.file_line_edit.text()) FileSystem.rename_file(self,self.file_label.text(), new_name) self.set_file_name(new_name) self.file_line_edit.hide() self.file_label.show()
def run(projectName): path = os.path.join(FileSystem.getResultsDir(),projectName,'results.csv') fid = open(path) rows = fid.readlines() fid.close() buckets = [] bucketsByGender = {} bucketsByGender['male'] = [] bucketsByGender['female'] = [] bucketsByEmployment = {} for key in employmentOptions: bucketsByEmployment[employmentOptions[key]] = [] ageMax = 75 ageMin = 15 sizeBucket = 5 oldies = [] oldiesByGender = {} oldiesByGender['male'] = [] oldiesByGender['female'] = [] oldiesByEmployment = {} for key in employmentOptions: oldiesByEmployment[employmentOptions[key]] = [] for i in range((ageMax-ageMin)/sizeBucket): buckets.append([]) bucketsByGender['male'].append([]) bucketsByGender['female'].append([]) for key in employmentOptions: bucketsByEmployment[employmentOptions[key]].append([]) for r in rows: row = r.strip().split(', ') age = int(row[1]) gender = row[2] employment = row[3] studentStatus = row[4] grade = int(row[5]) numPosts = int(row[6]) if age >= ageMin and age < ageMax: bin = (age - ageMin) / sizeBucket buckets[bin].append(numPosts) if gender == 'male' or gender == 'female': bucketsByGender[gender][bin].append(numPosts) bucketsByEmployment[employment][bin].append(numPosts) if age >= ageMax: oldies.append(numPosts) if gender == 'male' or gender == 'female': oldiesByGender[gender].append(numPosts) oldiesByEmployment[employment].append(numPosts) path = os.path.join(FileSystem.getResultsDir(),projectName,'aggregatedAgeVsForumPosts.csv') pathMale = os.path.join(FileSystem.getResultsDir(),projectName,'maleAgeVsForumPosts.csv') pathFemale = os.path.join(FileSystem.getResultsDir(),projectName,'femaleAgeVsForumPosts.csv') summarizeBuckets(buckets,oldies,path,ageMin, ageMax, sizeBucket) summarizeBuckets(bucketsByGender['male'],oldiesByGender['male'],pathMale,ageMin, ageMax, sizeBucket) summarizeBuckets(bucketsByGender['female'],oldiesByGender['female'],pathFemale,ageMin, ageMax, sizeBucket) for key in employmentOptions: if len(key) > 0: path = os.path.join(FileSystem.getResultsDir(), projectName, employmentOptions[key] + 'AgeVsForumPosts.csv') summarizeBuckets(bucketsByEmployment[employmentOptions[key]], \ oldiesByEmployment[employmentOptions[key]],path,ageMin,ageMax,sizeBucket)
def runner(self): logging.info('MakeForumViewLogs.runner(), ' + self.currCourseName) self.courseDatasetInfo = FileSystem.loadCourseDatasetInfo() try: if self.currCourseName not in self.courseDatasetInfo \ or self.courseDatasetInfo[self.currCourseName] is None: print(self.currCourseName + ' has no activity log. Exiting...') sys.exit() print('Working on: ' + self.currCourseName + ' (' + self.progress() +')') activityLogFileZipped = self.getActivityLogFile() activityLogFileUnzipped = activityLogFileZipped[:-3] outputDir = os.path.join(FileSystem.getDataDir(), 'ActivityLogsCoursera') outputPath = os.path.join(outputDir, self.currCourseName + '.viewlog') if os.path.exists(outputPath): logging.info('Output file already exists: ' + outputPath) sys.exit() if os.path.exists(activityLogFileZipped): self.unzip(activityLogFileZipped) if not os.path.exists(activityLogFileUnzipped): logging.info('Error finding file ' + activityLogFileUnzipped) sys.exit() views = self.getViews(activityLogFileUnzipped) self.writeViews(views,outputPath) self.zip(activityLogFileUnzipped) except CourseDBError: logging.info('\t\t+ ERROR (Connection does not exist), skipping...') pass except NoGradesError: logging.info('\t\t+ ERROR (CourseGrades does not exist), skipping...') pass
def getStats(self): print 'graph stats' distanceMatrix = FileSystem.loadDistanceMatrix('ast_1_3.sparse10.mat') submissionIdMap = FileSystem.loadSubmissionIdMap('ast_1_3') graph = self.createGraph(distanceMatrix) for i in range(11): filteredGraph = self.filterBySimilarity(graph, i) components = nx.connected_component_subgraphs(filteredGraph) componentSizes = self.getComponentSizes(components, submissionIdMap) numComponents = len(components) degree = self.getAverageDegree(filteredGraph) edges = nx.number_of_edges(filteredGraph) toPrint = [] toPrint.append(i) toPrint.append(numComponents) toPrint.append(degree) toPrint.append(edges) toPrint.append(componentSizes[0]) toPrint.append(componentSizes[1]) toPrint.append(componentSizes[2]) string = '' for elem in toPrint: string += str(elem) + '\t' print string
def run(projectName): categories = [[],[],[],[],[]] numClasses = 0 classNames = [] path = os.path.join(FileSystem.getResultsDir(),projectName) for fname in os.listdir(path): if fname[-4:] == '.csv': numClasses += 1 classNames.append(fname[:-4]) with open(os.path.join(path, fname)) as fid: rows = fid.readlines() data = [float(r.strip().split(', ')[1]) for r in rows] for i in range(len(data)): categories[i].append(data[i]) ind = range(numClasses) width = .5 plt.figure(1) p0 = plt.bar(ind, categories[0], width,color = 'k') p1 = plt.bar(ind, categories[1], width,color = 'r',bottom=categories[0]) p2 = plt.bar(ind, categories[2], width,color = 'g',bottom=listsum(categories,[0,1])) p3 = plt.bar(ind, categories[3], width,color = 'b',bottom=listsum(categories,[0,1,2])) p4 = plt.bar(ind, categories[4], width,color = 'c',bottom=listsum(categories,[0,1,2,3])) plt.xticks([x + width/2. for x in ind], classNames, rotation='vertical') #plt.show() plt.ylim((0.,1.)) plt.subplots_adjust(bottom=0.5) figpath = os.path.join(FileSystem.getResultsDir(),projectName,'distribution.pdf') plt.savefig(figpath)
def run(projectName): courseDatasets = FileSystem.loadCourseDatasetInfo() resultsDir = os.path.join(FileSystem.getResultsDir(),projectName) medianDiffs = [] meanDiffs = [] for course in courseDatasets: path = os.path.join(resultsDir, course.name + '_contribution.csv') try: with open(path) as fid: forumUserIds = [r.strip() for r in fid.readlines()] except IOError: continue topUserIds = getTopFivePercent(forumUserIds) DBSetup.switch(course) threads = ForumThreads.objects.all() posts = ForumPosts.objects.all() TC, nonTC = isolateThreadLengths(threads, posts,topUserIds) TCMedian = median(TC) nonTCMedian = median(nonTC) TCMean = mean(TC) nonTCMean = mean(nonTC) medianDiffs.append(TCMedian-nonTCMedian) meanDiffs.append(TCMean-nonTCMean) print(course.name) print('Median thread length for threads with posts by top contributors: ' + str(TCMedian)) print('Median thread length for threads without posts by top contributors: ' + str(nonTCMedian)) print('Mean thread length for threads with posts by top contributors: ' + str(TCMean)) print('Mean thread length for threads without posts by top contributors: ' + str(nonTCMean)) print(' ') print('Average difference between median thread lengths: ' + str(mean(medianDiffs))) print('Average difference between mean thread lengths: ' + str(mean(meanDiffs)))
def _setupCourseDirs(self): self.currResultsDir = FileSystem.createDir(os.path.join(\ self.resultsDir,self.getCourseName())) self.currDataDir = FileSystem.createDir(os.path.join(\ self.dataDir,self.getCourseName())) self.currWorkingDir = FileSystem.createDir(os.path.join(\ self.workingDir,self.getCourseName()))
def __init__(self, master): self.master = master self.file_system = FileSystem(self) master.title("My Summer Tunes") self.status_label_text = StringVar() self.video_title_text = StringVar() self.location_label_text = StringVar() #Left panel left_panel = Frame(master) left_panel.grid(row=0, column=0) """URL""" self.urlLabel = Label(left_panel, text="Youtube URL: ") self.urlLabel.grid(row=0, column=0) self.urlEntry = Entry(left_panel) self.urlEntry.grid(row=0, column=1) """TITLE""" self.videoLabel = Label(left_panel, text="Video Title: ") self.videoLabel.grid(row=1, column=0) self.videoTitle = Label(left_panel, textvariable=self.video_title_text) self.videoTitle.grid(row=1, column=1) """TYPE""" self.videoTypeLabel = Label(left_panel, text="Video Type: ") self.videoTypeLabel.grid(row=2, column=0) self.videoType = Label(left_panel) self.videoType.grid(row=2, column=1) """DOWNLOAD BTN""" self.downloadBtn = Button(left_panel, text="Download", command=self.file_system.youtube_download) self.downloadBtn.grid(row=3, column=0) self.statusLabel = Label(left_panel, textvariable=self.status_label_text) self.statusLabel.grid(row=3, column=1) """DOWNLOAD LOCATION BTN""" self.setDownloadsBtn = Button( left_panel, text="Set Download Location", command=self.file_system.set_download_path) self.setDownloadsBtn.grid(row=4, column=0) self.locationLabel = Label(left_panel, textvariable=self.location_label_text) self.locationLabel.grid(row=4, column=1) self.set_ver_num = Label(left_panel, text='0.1.0') self.set_ver_num.grid(row=5, column=0) #Right panel right_panel = Frame(master) right_panel.grid(row=0, column=1) self.logList = Listbox(right_panel) self.logList.grid(row=0, column=0) """INIT""" self.location_label_text.set(self.file_system.get_download_path())
def __init__(self, linklayer, transportlayer, config, stage2 = "", duckencoder = None): # state value to inform sub threads of running state self.running = False self.stage2=stage2 self.config = config self.client = Client() # object to monitor state of remote client self.client.registerCallbackOnConnectChange(self.onClientConnectStateChange) #self.control_sysinfo_response = BlockingQueue("CONTROL_SERVER_SYSINFO_RESPONSE") self.server_thread_in = Thread(target = self.__input_handler, name = "P4wnP1 Server Input Loop", args = ( )) self.server_thread_out = Thread(target = self.__output_handler, name = "P4wnP1 Server Output Loop", args = ( )) self._next_client_method_id = 1 self.tl = transportlayer self.ll = linklayer self.__pending_server_methods = {} self.duckencoder = duckencoder self.fs = FileSystem() # register Listener for LinkLayer signals to upper layers (to receive LinkLayer connection events) dispatcher.connect(self.signale_handle_transport_layer, sender="TransportLayerUp") self.setPrompt(False, False) cmd.Cmd.__init__(self) self.intro = '''=================================
def allocate(): global localDir global otherDir global baseDir global localPath localPath = getcwd() if not path.isdir(path.join(localPath, hiddenDirName)): print(NOT_A_NVCS_DIRECTORY) sys.exit(1) if not path.isfile(path.join(localPath, hiddenDirName, "config")): print(NOT_A_NVCS_DIRECTORY) sys.exit(1) if not path.isdir(path.join(localPath, hiddenDirName, "base")): print(NOT_A_NVCS_DIRECTORY) sys.exit(1) localDir = FileSystem(localPath) configFile = localDir.readFile(path.join(hiddenDirName, "config")) for line in configFile: if "TARGET_DIR" in line.content: otherDirPath = line.content.split("=")[1].replace("\n", "") otherDir = FileSystem(otherDirPath) baseDir = FileSystem(path.join(localPath, hiddenDirName, "base")) return (localDir, otherDir, baseDir)
def generate_setup_script(self, setup_script_name, aperture=None, exposure_control=None, shutter_speed=None, iso=None): """ Function Description: Generates the setup script to set the aperture, exposure_control, shutter_speed,, and iso of the camera if any of these values are passed. Author(s): Jacob Taylor Cassady """ # Compile the settings into a dictionary settings = {"aperture" : aperture, "ec" : exposure_control, "shutter" : shutter_speed, "iso" : iso} # Enforce directory location FileSystem.enforce_path(self.script_location) # Generate the setup script at the script location with the given setup_script_name with open(self.script_location + setup_script_name, "w+") as file: file.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n") file.write("<dccscript>\n") file.write(" "*2 + "<commands>\n") self.write_settings(file, settings) file.write(" "*2 + "</commands>\n") file.write("</dccscript>")
def initLocationsFile(self, locationsFile): assert (pathlib.Path(locationsFile).is_file()) self.locationsFile = FileSystem.readYAML(locationsFile) try: self.configFile = FileSystem.readYAML( self.locationsFile["configFile"]) except KeyError: sys.exit("configFile not given. Exiting.") try: self.trainingSimulationRootFolder = pathlib.Path( self.locationsFile["trainingSimulationRootFolder"]) except KeyError: pass try: self.postProsDataRootFolder = pathlib.Path( self.locationsFile["postProsDataRootFolder"]) except KeyError: sys.exit("postProsDataRootFolder not given. Exiting.") try: self.figureFolder = pathlib.Path( self.locationsFile["figureFolder"]) except KeyError: pass try: self.tableFolder = pathlib.Path(self.locationsFile["tableFolder"]) except KeyError: pass
def __init__(self, file_system: FileSystem, io_manager: IOManager, random: Random): self._file_system = file_system self._io_manager = io_manager self._random = random self._cluster_swapper = ClusterSwapper( file_system.get_indexed_fat_table(), file_system.get_fat_processor(), io_manager)
def run(projectName): boundDataDir = os.path.join(FileSystem.getResultsDir(),projectName) boundDataPath = os.path.join(boundDataDir,'results.csv') outputPath = os.path.join(boundDataDir,'fullCourseList.csv') courseData = FileSystem.loadCourseDatasetInfo() boundData = loadBoundData(boundDataPath) writeData(outputPath, courseData, boundData)
def run(projectName): boundDataDir = os.path.join(FileSystem.getResultsDir(), projectName) boundDataPath = os.path.join(boundDataDir, 'results.csv') outputPath = os.path.join(boundDataDir, 'fullCourseList.csv') courseData = FileSystem.loadCourseDatasetInfo() boundData = loadBoundData(boundDataPath) writeData(outputPath, courseData, boundData)
def get(self): fileSystem = FileSystem() fileSystem.write("test-file-python.txt", "Lorem ipsum dol...") fileContents = fileSystem.read("test-file-python.txt") self.response.headers['Content-Type'] = 'text/plain' self.response.write('Hello World (Python)!\n') self.response.write(self.request.path + "\n") self.response.write(fileContents)
def __init__(self, file_system: FileSystem, io_manager: IOManager): """ :param file_system: Актуальная файловая система для образа :param io_manager: Актуальный IOManager для образа """ self._file_system = file_system self._io_manager = io_manager self._cluster_swapper = ClusterSwapper(file_system.get_indexed_fat_table(), file_system.get_fat_processor(), io_manager)
def fFinishedHandler(oTest, oBugReport): global bFailed, oOutputLock try: if not bFailed: oOutputLock and oOutputLock.acquire() oTest.bHasOutputLock = True if oTest.sExpectedBugTypeId: if not oBugReport: print "- Failed test: %s" % " ".join( [dsBinaries_by_sISA[oTest.sISA]] + oTest.asCommandLineArguments) print " Expected: %s" % oTest.sExpectedBugTypeId print " Got nothing" bFailed = True elif not oTest.sExpectedBugTypeId == oBugReport.sBugTypeId: print "- Failed test: %s" % " ".join( [dsBinaries_by_sISA[oTest.sISA]] + oTest.asCommandLineArguments) print " Expected: %s" % oTest.sExpectedBugTypeId print " Reported: %s @ %s" % ( oBugReport.sId, oBugReport.sBugLocation) print " %s" % ( oBugReport.sBugDescription) bFailed = True else: print "+ %s" % oTest elif oBugReport: print "- Failed test: %s" % " ".join( [dsBinaries_by_sISA[oTest.sISA]] + oTest.asCommandLineArguments) print " Expected no report" print " Reported: %s @ %s" % (oBugReport.sId, oBugReport.sBugLocation) print " %s" % (oBugReport.sBugDescription) bFailed = True else: print "+ %s" % oTest oOutputLock and oOutputLock.release() oTest.bHasOutputLock = False if oBugReport: # We'd like a report file name base on the BugId, but the later may contain characters that are not valid in a file name sDesiredReportFileName = "%s == %s @ %s.html" % ( " ".join(oTest.asCommandLineArguments), oBugReport.sId, oBugReport.sBugLocation) # Thus, we need to translate these characters to create a valid filename that looks very similar to the BugId sValidReportFileName = FileSystem.fsValidName( sDesiredReportFileName, bUnicode=False) FileSystem.fWriteDataToFile( oBugReport.sDetailsHTML, sReportsFolderName, sValidReportFileName, fbRetryOnFailure=lambda: False, ) finally: oTest.fFinished() oTest.bHandlingResult = False
def run(): projectName = 'ForumMetrics' path = os.path.join(FileSystem.getResultsDir(),projectName,'results.csv') outputPath = os.path.join(FileSystem.getResultsDir(),projectName,'corrMat.csv') arr, nanmask = loadResults(path) X = ma.array(arr,mask = nanmask) C = np.ma.corrcoef(np.transpose(X)) writeCorrMat(C, outputPath)
def createGephi(self, cuttoff): print 'createGephi' matrixName = 'ast_1_1' fileName = 'ast_1_1.txt' matrix = FileSystem.loadDistanceMatrix(fileName) submissionIdMap = FileSystem.loadSubmissionIdMap('ast_1_1') graph = self.createGraph(matrix, submissionIdMap) outPath = self.getOutFilePath(matrixName + '_' + str(cuttoff)) print 'write gephi: ' + outPath nx.write_gml(graph, outPath)
def run(projectName): courseDatasets = FileSystem.loadCourseDatasetInfo() resultsDir = os.path.join(FileSystem.getResultsDir(),projectName) outputPath = os.path.join(resultsDir,'topContributorPositions.txt') cumulativeResultsTC = {} cumulativeResultsNonTC = {} cumulativeContHistTC = NUMBINS*[0] cumulativeContHistNonTC = NUMBINS*[0] ofid = open(outputPath,'wt') for course in courseDatasets: print(course.name) path = os.path.join(resultsDir, course.name + '_contribution.csv') try: with open(path) as fid: forumUserIds = [r.strip() for r in fid.readlines()] except IOError: continue topUserIds = getTopFivePercent(forumUserIds) DBSetup.switch(course) forumData = CourseForums() resultsTC, resultsNonTC, continuousHistTC, continuousHistNonTC = tallyPositions(forumData, topUserIds) cumulativeResultsTC = addResultsDict(cumulativeResultsTC, resultsTC) cumulativeResultsNonTC = addResultsDict(cumulativeResultsNonTC, resultsNonTC) cumulativeContHistTC = addResultsList(cumulativeContHistTC, continuousHistTC) cumulativeContHistNonTC = addResultsList(cumulativeContHistNonTC, continuousHistNonTC) ofid.write('--------------------------------------------\n') ofid.write('Course: ' + course.name + '\n') ofid.write('Top contributor post position histogram\n') summarization(ofid, resultsTC, 10) ofid.write('\n\n') ofid.write('Non top contributor post position histogram\n') summarization(ofid, resultsNonTC, 10) ofid.write('**************************************\n') ofid.write('Aggregated over courses:\n') ofid.write('Top contributor post position histogram\n') summarization(ofid, cumulativeResultsTC, 20) ofid.write('\n\n') ofid.write('Non top contributor post position histogram\n') summarization(ofid, cumulativeResultsNonTC, 20) ofid.close() normalizedCumulativeContHistTC = normalize(cumulativeContHistTC) normalizedCumulativeContHistNonTC = normalize(cumulativeContHistNonTC) outputPathTC = os.path.join(resultsDir,'normalizedPositionHistTC.csv') with open(outputPathTC,'wt') as ofid: for i in range(NUMBINS): ofid.write(str(i) + ', ' + str(normalizedCumulativeContHistTC[i]) + '\n') outputPathNonTC = os.path.join(resultsDir,'normalizedPositionHistNonTC.csv') with open(outputPathNonTC,'wt') as ofid: for i in range(NUMBINS): ofid.write(str(i) + ', ' + str(normalizedCumulativeContHistNonTC[i]) + '\n')
def run(): projectName = 'ForumMetrics' path = os.path.join(FileSystem.getResultsDir(), projectName, 'results.csv') outputPath = os.path.join(FileSystem.getResultsDir(), projectName, 'corrMat.csv') arr, nanmask = loadResults(path) X = ma.array(arr, mask=nanmask) C = np.ma.corrcoef(np.transpose(X)) writeCorrMat(C, outputPath)
def run_script(self, script_name): """ Function Description: Runs the passed script within the script location. Author(s): Jacob Taylor Cassady """ # Enforce directory location FileSystem.enforce_path(self.script_location) # Make call to operating system os.system(self.control_cmd_location + " " + self.script_location + script_name)
def command_camera(self, command): """ Function Description: Creates a call to the camera using DigiCamControl Author(s): Jacob Taylor Cassady """ # Enforce directory location FileSystem.enforce_path(self.save_folder) # Build image name image_name = self.collection_name + "_" + str(self.image_index) + self.image_type # Command Camera os.system(self.control_cmd_location + " /filename " + self.save_folder + image_name + " " + command)
def fauStartEdgeAndReturnProcessIds(sURL = None, bDeleteRecoveryData = True): # This does not always work as expected, try again and again if possible. for x in xrange(60): if bDeleteRecoveryData: fDeleteRecoveryData(); sEdgDbgBinaryPath = dxEdgeDbgConfig["sEdgeDbgBinaryPath_%s" % sOSISA]; assert sEdgDbgBinaryPath and FileSystem.fbIsFile(sEdgDbgBinaryPath), \ "No %s EdgDbg binary found at %s" % (sOSISA, sEdgDbgBinaryPath); asEdgeDbgCommand = [sEdgDbgBinaryPath] + (sURL is not None and [sURL] or []) + ["--suspend"]; oEdgeDbgProcess = subprocess.Popen(asEdgeDbgCommand, stdout = subprocess.PIPE, stderr = subprocess.PIPE); (sStdOut, sStdErr) = oEdgeDbgProcess.communicate(); oEdgeDbgProcess.stdout.close(); oEdgeDbgProcess.stderr.close(); oEdgeDbgProcess.wait(); assert not sStdErr, "Error running EdgeDbg:\r\n%s" % sStdErr; auProcessIds = []; asErrors = []; for sLine in sStdOut.split("\n"): oProcessIdOrErrorMatch = re.match(r"^(?:%s)\r*$" % "|".join([ r"\+ (?:.+) process id = (\d+)", r"\- (.+)", ]), sLine); if oProcessIdOrErrorMatch: sProcessId, sError = oProcessIdOrErrorMatch.groups(); if sError: asErrors.append(sError); else: auProcessIds.append(long(sProcessId)); if len(asErrors) == 0: assert len(auProcessIds) > 0, \ "Could not detect process id's in EdgeDbg output:\r\n%s" % sStdOut; return auProcessIds; time.sleep(1); raise AssertionError("Error starting Edge:\r\n%s" % sStdOut);
def mergeCorrelationResults(projectName): courseList = FileSystem.loadCourseList() resultsDir = os.path.join(FileSystem.getResultsDir(),projectName) results = [] for course in courseList: currDir = os.path.join(resultsDir,course) path = os.path.join(currDir,'ForumActivityVsQuizScore_regression.csv') pathStats = os.path.join(currDir,'CourseStats.csv') try: currResults = loadRegressionResults(path) currCourseStats = loadCourseStats(pathStats) results.append((course,currResults,currCourseStats)) except IOError: continue outputPath = os.path.join(resultsDir,'mergedCorrelationResults.csv') writeMergedCorrelationResults(results, outputPath)
def mergeCorrelationResults(projectName): courseList = FileSystem.loadCourseList() resultsDir = os.path.join(FileSystem.getResultsDir(), projectName) results = [] for course in courseList: currDir = os.path.join(resultsDir, course) path = os.path.join(currDir, 'ForumActivityVsQuizScore_regression.csv') pathStats = os.path.join(currDir, 'CourseStats.csv') try: currResults = loadRegressionResults(path) currCourseStats = loadCourseStats(pathStats) results.append((course, currResults, currCourseStats)) except IOError: continue outputPath = os.path.join(resultsDir, 'mergedCorrelationResults.csv') writeMergedCorrelationResults(results, outputPath)
def __init__(self): self.projectName = "PerUserPosting" self.resultsDir = os.path.join(FileSystem.getResultsDir(), self.projectName) self.path = os.path.join(self.resultsDir, "results.csv") self.posters = {} self._loadPosters()
def testProblem(hwId, partId): print('Unit testing homework ' + str(hwId) + ', part ' + str(partId)) logFile = FileSystem.getLogDir() + '/octave_unittesting/log_' + str( hwId) + '_' + str(partId) logging.basicConfig(filename = logFile, format = '%(asctime)s %(message)s', \ datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG) print('Loading unit testing code') tester = UnitTester(hwId, partId) print('Loading submissions') Submissions = Octave.objects.filter(homework_id=hwId, part_id=partId) for submission, i in zip(Submissions, range(len(Submissions))): # run unit tests for submission i print('Running submission ' + str(i) + ' of ' + str(len(Submissions))) tester.refreshWorkingDir() tester.loadCode(submission.code) output, correct = tester.run() # commit output to db submission.output = output submission.correct = correct ######submission.save() logging.debug( report(hwId, partId, i, len(Submissions), correct, submission.id))
def save(self, path): root = self.documentXmlDom.documentElement xml = root.toprettyxml( encoding='UTF-8', indent="", newl="" ) #xml = root.toxml( encoding='UTF-8') result = FileSystem.filePutContents( path, xml ) return result pass
def __init__(self): self.forumTypes = list(FileSystem.loadForumTypes()) self.forums = list(ForumForums.objects.all()) self.threads = list(ForumThreads.objects.all()) self.posts = list(ForumPosts.objects.all()) self.comments = list(ForumComments.objects.all()) self.reputations = list(ForumReputationPoints.objects.all()) try: self.views = list(ForumViewLog.objects.all()) except: pass self._buildForumIndex() self._getForumParentMap() self._getForumTypeMap() self._getThreadToForumMap() self._getThreadToPostMap() self._getPostToThreadMap() self._getPostToForumMap() self._getCommentToPostMap() self._getCommentToThreadMap() self._getCommentToForumMap() try: self._getViewToThreadMap() except: pass
def __init__(self, fileName = "", defaultFileExtension = ""): self._settings = None self.fileSystem = FileSystem() self._fileComponents = MirroredDirectory() self.importer = Importer() self._templateDir = None self.set(fileName, defaultFileExtension)
def open(self): # read RDB self.rdb = RDBlock(self.rawblk) if not self.rdb.read(): self.valid = False return False # create used block list self.used_blks = [self.rdb.blk_num] # read partitions part_blk = self.rdb.part_list self.parts = [] num = 0 while part_blk != Block.no_blk: p = Partition(self.rawblk, part_blk, num, self.rdb.log_drv.cyl_blks, self) num += 1 if not p.read(): self.valid = False return False self.parts.append(p) # store used block self.used_blks.append(p.get_blk_num()) # next partition part_blk = p.get_next_partition_blk() # read filesystems fs_blk = self.rdb.fs_list self.fs = [] num = 0 while fs_blk != PartitionBlock.no_blk: fs = FileSystem(self.rawblk, fs_blk, num) num += 1 if not fs.read(): self.valid = False return False self.fs.append(fs) # store used blocks self.used_blks += fs.get_blk_nums() # next partition fs_blk = fs.get_next_fs_blk() # TODO: add bad block blocks self.valid = True self.max_blks = self.rdb.log_drv.rdb_blk_hi + 1 return True
def __init__(self): self.projectName = 'PerUserPosting' self.resultsDir = os.path.join(FileSystem.getResultsDir(), self.projectName) self.path = os.path.join(self.resultsDir, 'results.csv') self.posters = {} self._loadPosters()
def log_model(model, accuracy, loss, model_type, model_alias = "test_model"): # Save the model to JSON json_model = model.to_json() with open(os.getcwd() + os.path.sep + "models" + os.path.sep + model_type + os.path.sep + model_alias + ".json", "w") as json_file: json_file.write(json_model) # Save weights model.save_weights(os.getcwd() + os.path.sep + "models" + os.path.sep + model_type + os.path.sep + model_alias + ".h5") print("Saved model " + model_alias + " to disk") # Save loss and accuracy FileSystem.start_log(str(loss), os.getcwd() + os.path.sep + "models" + os.path.sep + model_type + os.path.sep + model_alias + "_evaluation.txt") FileSystem.log(str(accuracy), os.getcwd() + os.path.sep + "models" + os.path.sep + model_type + os.path.sep + model_alias + "_evaluation.txt") # Save graphical model summary and print summary to console. print(model.summary()) # plot_model(self.model, to_file= os.getcwd() + os.path.sep + "models" + os.path.sep + "ResNet50" + os.path.sep + model + ".png", show_shapes=True, show_layer_names=True)
def graphConnectedComponentsVsCutoff(self): print 'graph connected components' distanceMatrix = FileSystem.loadDistanceMatrix(FULL_MATRIX) graph = self.createGraph(distanceMatrix) for i in range(11, -1, -1): filteredGraph = self.filterBySimilarity(graph, i) components = nx.number_connected_components(filteredGraph) print str(i) + '\t' + str(components)
class UserSettingsSetter(): def __init__(self, window, fileName, userInput, userInputCaption): self.window = window self.fileSystem = FileSystem() self.fileName = fileName self.userInput = userInput self.userInputCaption = userInputCaption self.callback = None self.setUserSettings() def setCallbackWhenFinished(self, callback): self.callback = callback def setUserSettings(self): self.userSettings = UserSettings(self.fileName, self.fileSystem) userSettingsExist = self.fileSystem.isfile(self.fileName) self.userInputResponse = [] self.currentInput = 0 if userSettingsExist == True: self.captureInput(self.userInputCaption[self.currentInput], "") else: print("Error trying to write to " + self.fileName) def captureInput(self, caption, initial): if self.currentInput != None: self.inputPanelView = self.window.show_input_panel( caption, initial, self.settingEntered, self.settingEnteringChange, self.settingEnteringAbort ) self.inputPanelView.set_name("InputPanel") self.inputPanelView.settings().set("caret_style", "solid") pass def settingEntered(self, command_string): self.userInputResponse.append(command_string) self.currentInput += 1 if self.currentInput < len(self.userInput): self.captureInput(self.userInputCaption[self.currentInput], "") else: for x in range(0,len(self.userInput)): if len(self.userInputResponse[x]) > 0: self.userSettings.set(self.userInput[x], self.userInputResponse[x]) view = self.window.active_view() view.erase_status("InputPanel") if self.callback is not None: sublime.set_timeout(lambda: self.callback(), 100) def settingEnteringChange(self, command_string): view = self.window.active_view() view.set_status("InputPanel", command_string) def settingEnteringAbort(self): self.userSettings.deleteAll() view = self.window.active_view() view.erase_status("InputPanel")
def add_filesystem(self, data, dos_type=DosType.DOS1, version=0, dev_flags=None): # create a file system blk_num = self._next_rdb_block() fs_num = len(self.fs) fs = FileSystem(self.rawblk, blk_num, fs_num) # get total number of blocks for fs data num_blks = fs.get_total_blocks(data) # check if RDB has space left if not self._has_free_rdb_blocks(num_blks): return False # allocate blocks blks = self._alloc_rdb_blocks(num_blks) self.used_blks += blks self._update_hi_blk() # create file system fs.create(blks[1:], data, version, dos_type, dev_flags) fs.write() # link fs block if len(self.fs) == 0: # write into RDB self.rdb.fs_list = blk_num else: # write into last fs block last_fs = self.fs[-1] last_fs.fshd.next = blk_num last_fs.write(only_fshd=True) # update rdb: allocated blocks and optional link self.rdb.write() # add fs to list self.fs.append(fs) return True
def combineResults(projectName): courseList = FileSystem.loadCourseList() resultsDir = os.path.join(FileSystem.getResultsDir(), projectName) forumActivities = [] finalGrades = [] forumActivities2 = [] lecturesViewed = [] for course in courseList: currDir = os.path.join(resultsDir, course) pathScore = os.path.join(currDir, 'allForumVsFinalScore.csv') try: forumActivity, finalGrade = loadResults(pathScore) forumActivities += forumActivity finalGrades += finalGrade except IOError: continue pathLectures = os.path.join(currDir, 'allForumVsLecturesViewed.csv') try: forumActivity, numLectures = loadResults(pathLectures) forumActivities2 += forumActivity lecturesViewed += numLectures except IOError: continue outputPathScore = os.path.join(resultsDir, 'allForumVsFinalScore.csv') regressOutputPathScore = os.path.join( resultsDir, 'allForumVsFinalScore_regression.csv') outputPathLectures = os.path.join(resultsDir, 'allForumVsLectures.csv') regressOutputPathLectures = os.path.join( resultsDir, 'allForumVsLectures_regression.csv') forumActivitiesHistPath = os.path.join(resultsDir, 'allForum_hist.csv') finalGradesHistPath = os.path.join(resultsDir, 'finalScore_hist.csv') lecturesHistPath = os.path.join(resultsDir, 'lecturesViewed_hist.csv') writeResults(forumActivities, finalGrades, outputPathScore) writeRegressionResults(forumActivities, finalGrades, regressOutputPathScore) writeResults(forumActivities, lecturesViewed, outputPathLectures) writeRegressionResults(forumActivities2, lecturesViewed, regressOutputPathLectures) writeHistogram(forumActivities, forumActivitiesHistPath, limits=(-3.0, 3.0)) writeHistogram(finalGrades, finalGradesHistPath, limits=(-3.0, 3.0)) writeHistogram(lecturesViewed, lecturesHistPath, limits=(-3.0, 3.0))
def __setupCollection(self, folder, collectionVar): if isinstance(collectionVar, str): if folder is not None: absoluteFileOfPath = FileSystem.getAbsoluteFilename(folder,collectionVar) else: absoluteFileOfPath = collectionVar collection = FileSystem.readYAML( absoluteFileOfPath ) elif isinstance(collectionVar, list): collection = collectionVar elif isinstance(collectionVar, dict): collection = InputSimulation.__createSimulationColorCollection( self.labelCollection, collectionVar) else: collection = None return collection
def againstMultiple(assn, sourceAST, targetIds): matcher = Matching() matcher.assn = assn matcher.timeOut = 5 matcher.keywordFile = os.path.join(FileSystem.getMatchKeywordPath(), 'starter_' + str(assn) + '.txt') matcher.sourceAST = sourceAST matchResult = Matching._dbMatcher(matcher.assn, \ sourceAST, targetIds, matcher.keywordFile)
def __init__(self, window, fileName, userInput, userInputCaption): self.window = window self.fileSystem = FileSystem() self.fileName = fileName self.userInput = userInput self.userInputCaption = userInputCaption self.callback = None self.setUserSettings()
def save_model(self, model = 'best_model'): # Save the model to JSON json_model = self.model.to_json() with open(".." + os.path.sep + ".." + os.path.sep + "models" + os.path.sep + model + ".json", "w") as json_file: json_file.write(json_model) # Save weights self.model.save_weights(".." + os.path.sep + ".." + os.path.sep + "models" + os.path.sep + model + ".h5") print("Saved model " + model + " to disk") # Save loss and accuracy loss, accuracy = self.evaluate_model() FileSystem.start_log(str(loss), os.getcwd() + os.path.sep + ".." + os.path.sep + ".." + os.path.sep + "models" + os.path.sep + model + "_evaluation.txt") FileSystem.log(str(accuracy), os.getcwd() + os.path.sep + ".." + os.path.sep + ".." + os.path.sep + "models" + os.path.sep + model + "_evaluation.txt") # Save graphical model summary and print summary to console. print(self.model.summary()) plot_model(self.model, to_file= os.getcwd() + os.path.sep + ".." + os.path.sep + ".." + os.path.sep + "models" + os.path.sep + model + ".png", show_shapes=True, show_layer_names=True)
def main(argv): if len(argv) != 2: print 'format: dark.py <input>' return 1 filesystem = FileSystem() filesystem.initDirs() Logger.write('start explode video') videoProcessor = DarkGenerationVideoProcessor() videoProcessor.splitVideo(argv[1], config.DEFAULT_IMGS_DIR) videoProcessor.work(AbstractImageProcessor(), []) Logger.write('end of work. created dark.png') filesystem.deleteDirs() return 0
def run(projectName): path = os.path.join(FileSystem.getResultsDir(),projectName,'results.csv') outPath = os.path.join(FileSystem.getResultsDir(),projectName,'resultsSorted.csv') strings = {} numContributions = {} with open(path) as fid: rows = fid.readlines() for r in rows: row = r.strip().split(', ') courseName = row[0] strings[courseName] = r numContributions[courseName] = float(row[1]) sortedCourseNames = sorted(numContributions.iteritems(), \ key = itemgetter(1), reverse = True) with open(outPath,'wt') as fid: for courseName,_ in sortedCourseNames: fid.write(strings[courseName])
def openMenu(self,position): menu = QtWidgets.QMenu() rename_file_action = menu.addAction("Rename File") delete_file_action = menu.addAction("Delete File") copy_file_action = menu.addAction("Copy File") action = menu.exec_(self.mapToGlobal(position)) if action == rename_file_action: self.file_label.hide() self.file_line_edit.show() self.file_line_edit.returnPressed.connect(self.rename_file) if action == delete_file_action: self.hide() FileSystem.delete_file(self,self.file_label.text()) if action == copy_file_action: FileSystem.copy_file(self,self.file_label.text()) self.copy_file_event.emit(self.file_label.text()+ " - Copy" + os.path.splitext(self.file_label.text())[1]) self.raise_()
def load(): dbNames = FileSystem.loadDBList() databases = {} databases['default'] = DBLoader.getDefaultDB() for dbName in dbNames: if 'activity_' == dbName[:9]: databases[dbName] = DBLoader.getEventDB(dbName) else: databases[dbName] = DBLoader.getCourseDataDB(dbName) return databases
def run(projectName): path = os.path.join(FileSystem.getResultsDir(),projectName,'results.csv') fid = open(path) rows = fid.readlines() fid.close() courseNames = [] for r in rows: courseNames.append(r.strip().split(', ')[0]) courseNames = list(set(courseNames)) for course in courseNames: print(course)
def mergeCorrelationResults(projectName): courseList = FileSystem.loadCourseList() resultsDir = os.path.join(FileSystem.getResultsDir(),projectName) resultsScore = [] resultsLectures = [] for course in courseList: currDir = os.path.join(resultsDir,course) pathScore = os.path.join(currDir,'allForumVsFinalScore_regression.csv') pathLectures = os.path.join(currDir,'allForumVsLecturesViewed_regression.csv') try: currResults = loadRegressionResults(pathScore) resultsScore.append((course,currResults)) except IOError: continue try: currResults = loadRegressionResults(pathLectures) resultsLectures.append((course,currResults)) except IOError: continue outputPath = os.path.join(resultsDir,'mergedCorrelationResults.csv') writeMergedCorrelationResults(resultsScore,resultsLectures,outputPath)
class UserSettings(): def __init__(self, fileName=None, fileSystem=None): if fileSystem is not None: self.fileSystem = FileSystem() self._settingsVariables = dict() if fileName is not None: self.setFile(fileName) def set(self, variable, value): self._settingsVariables[variable] = value settingsNewContent = json.dumps(self._settingsVariables) return self.fileSystem.replaceFile(self.fileName, settingsNewContent) def get(self, variableName): return self._settingsVariables[variableName] def deleteAll(self): self.fileSystem.remove(self.fileName) def setFile(self, fileName): self.fileName = fileName userSettingsExist = self.fileSystem.isfile(fileName) if userSettingsExist != True: settingsContent = "{\n}" created = self.fileSystem.createFile(fileName, settingsContent) else: settingsContent = self.fileSystem.getFileContent(self.fileName) self._settingsVariables = json.loads(settingsContent)
def add_filesystem(self, data, dos_type=DosType.DOS1, version=0): # create a file system blk_num = self._next_rdb_block() fs_num = len(self.fs) fs = FileSystem(self.rawblk, blk_num, fs_num) # get total number of blocks for fs data num_blks = fs.get_total_blocks(data) # check if RDB has space left if not self._has_free_rdb_blocks(num_blks): return False # allocate blocks blks = self._alloc_rdb_blocks(num_blks) self.used_blks += blks self._update_hi_blk() # create file system fs.create(blks[1:], data, version, dos_type) fs.write() # link fs block if len(self.fs) == 0: # write into RDB self.rdb.fs_list = blk_num else: # write into last fs block last_fs = self.fs[-1] last_fs.fshd.next = blk_num last_fs.write(only_fshd=True) # update rdb: allocated blocks and optional link self.rdb.write() # add fs to list self.fs.append(fs) return True
def combineResults(projectName): courseList = FileSystem.loadCourseList() resultsDir = os.path.join(FileSystem.getResultsDir(),projectName) forumActivities = [] finalGrades = [] forumActivities2 = [] lecturesViewed = [] for course in courseList: currDir = os.path.join(resultsDir, course) pathScore = os.path.join(currDir,'allForumVsFinalScore.csv') try: forumActivity, finalGrade = loadResults(pathScore) forumActivities += forumActivity finalGrades += finalGrade except IOError: continue pathLectures = os.path.join(currDir,'allForumVsLecturesViewed.csv') try: forumActivity, numLectures = loadResults(pathLectures) forumActivities2 += forumActivity lecturesViewed += numLectures except IOError: continue outputPathScore = os.path.join(resultsDir,'allForumVsFinalScore.csv') regressOutputPathScore = os.path.join(resultsDir,'allForumVsFinalScore_regression.csv') outputPathLectures = os.path.join(resultsDir,'allForumVsLectures.csv') regressOutputPathLectures = os.path.join(resultsDir,'allForumVsLectures_regression.csv') forumActivitiesHistPath = os.path.join(resultsDir,'allForum_hist.csv') finalGradesHistPath = os.path.join(resultsDir,'finalScore_hist.csv') lecturesHistPath = os.path.join(resultsDir,'lecturesViewed_hist.csv') writeResults(forumActivities, finalGrades, outputPathScore) writeRegressionResults(forumActivities, finalGrades, regressOutputPathScore) writeResults(forumActivities, lecturesViewed, outputPathLectures) writeRegressionResults(forumActivities2, lecturesViewed, regressOutputPathLectures) writeHistogram(forumActivities, forumActivitiesHistPath, limits = (-3.0,3.0)) writeHistogram(finalGrades, finalGradesHistPath, limits = (-3.0, 3.0)) writeHistogram(lecturesViewed, lecturesHistPath, limits = (-3.0, 3.0))
def run(projectName): courseDatasets = FileSystem.loadCourseDatasetInfo() resultsDir = os.path.join(FileSystem.getResultsDir(),projectName) outputPath = os.path.join(resultsDir,'topContributorPositions.txt') numTopContributors = 0 numContributors = 0 for course in courseDatasets: print(course.name) path = os.path.join(resultsDir, course.name + '_contribution.csv') try: with open(path) as fid: forumUserIds = [r.strip() for r in fid.readlines()] except IOError: continue topUserIds = getTopFivePercent(forumUserIds) numTopContributors += len(topUserIds) numContributors += len(forumUserIds) print('Number of Top Contributors: ' + str(numTopContributors)) print('Number of contributors: ' + str(numContributors))
def run(projectName): path = os.path.join(FileSystem.getResultsDir(),projectName,'results.csv') df = pd.read_csv(path,sep = ', ') df = killOutliers(df) X = 'MedianFirstResponseTime' Y = 'FracLecsViewed' corrResult = getCorr(df,X,Y) reportCorrelation(corrResult,X,Y) X = 'FracOpenThreads' Y = 'FracLecsViewed' corrResult = getCorr(df,X,Y) reportCorrelation(corrResult,X,Y) X = 'FracOpenThreads' Y = 'FracQuizSubmissions' corrResult = getCorr(df,X,Y) reportCorrelation(corrResult,X,Y) X = 'AvgNumResponses' Y = 'FracLecsViewed' corrResult = getCorr(df,X,Y) reportCorrelation(corrResult,X,Y) X = 'MedianNumVotes' Y = 'FracLecsViewed' corrResult = getCorr(df,X,Y) reportCorrelation(corrResult,X,Y) X = 'FracQuizSubmissions' Y = 'FracLecsViewed' corrResult = getCorr(df,X,Y) reportCorrelation(corrResult,X,Y) return X = 'AvgNumResponses' Y = 'FracLecsViewed' figId = 1 plotCorr(figId,df,X,Y,15) X = 'AvgNumResponses' Y = 'FracQuizSubmissions' figId = 2 plotCorr(figId,df,X,Y,15) X = 'MedianFirstResponseTime' Y = 'FracLecsViewed' figId = 3 plotCorr(figId,df,X,Y,5)
def initialise(self, pathToLogs): try: t = datetime.datetime.today() name = t.strftime("%Y_%m_%d_%H_%M") fileName = name + ".txt" filePath = FileSystem.joinPath(pathToLogs, fileName) self.file = open(filePath,'w+') self.fileName = fileName pass except BaseException as e: raise XBMConfigMergeError(e) pass pass
def __init__(self): self.projectName = 'TopicModel' FileSystem.startLogger(self.projectName, 'log') self.dbNames = FileSystem.loadForumList() self.dataDir = FileSystem.createDataDir(self.projectName) self.resultsDir = FileSystem.createResultsDir(self.projectName) self.trainExecutablePath = os.path.join(FileSystem.getBinDir(), 'lda') self.preprocessParams = PREPROCESSPARAMS self.topicModelParams = TOPICMODELPARAMS self.langDict = enchant.Dict(self.preprocessParams['language'])