def frontend(request): # return get_token(request)sss client = Client.getAuthenticatedUser(request=request) if client is None: return FileLoader.get(name='frontpage') return FileLoader.get(name='app')
def test_file_loader_can_load_list_of_integers(self): # arrange adjacency_list = os.path.join(os.path.dirname(__file__), "sample_files", "kargerMinCut.txt") # act result = FileLoader.load_adjacency_map(adjacency_list) # assert self.assertTrue(len(result) > 0)
def __init__(self, layer_sizes=[784, 300, 100, 10], learning_rate=1e-4, from_file=None, retain_mask=True, epsilons=[1, 1, 1]): self.learning_rate = learning_rate self.epsilons = epsilons self.layers = [] self.dl = dl.DataLoader() if from_file != None: counter = 0 while True: W, b = fl.retrieve_layer(from_file, counter) if W is not None: self.layers.append( l.Layer(W=W, b=b, l_obs_threshold=epsilons[counter], retain_mask=retain_mask)) else: break counter += 1 else: for i in range(len(layer_sizes) - 1): self.layers.append( l.Layer(num_inputs=layer_sizes[i], num_outputs=layer_sizes[i + 1], l_obs_threshold=epsilons[i])) self.train_images, self.train_labels = self.dl.get_training() self.test_images, self.test_labels = self.dl.get_testing()
def test_file_loader_can_load_list_of_integers(self): # arrange fn_list_of_integers = os.path.join(os.path.dirname(__file__), "sample_files", "IntegerArray.txt") # act result = FileLoader.load_list_of_long_integers(fn_list_of_integers) # assert self.assertTrue(len(result) > 0)
def test_compute_djikstra_shortest_path(self): inputfile = os.path.join(os.path.dirname(__file__), "sample_files", "dijkstraData.txt") undirected_weighted_graph = FileLoader.load_adjacency_list(inputfile) result = Djikstra.compute_shortest_path(undirected_weighted_graph) result_str = "" for ind in [7,37,59,82,99,115,133,165,188,197]: result_str = result_str + str(ind) + ':' + str(result[ind]) + "\n" self.assertTrue(len(result) > 0)
def test_quick_sort_with_median_of_three_as_pivot(self): # arrange aFile = os.path.join(os.path.dirname(__file__), "sample_files", "QuickSort.txt") list_of_integers = FileLoader.load_list_of_long_integers(aFile) # act result = QuickListSorter.quicksort_with_median_of_three_as_pivot(list_of_integers,0,len(list_of_integers)) # assert self.assertTrue(result > 0)
def downloadHistory(dump): listUri = [] endDate = date.today() - timedelta(days=1) for stock in FileLoader.loadNSEData(): uri = "/".join([dump, getFileName(stock)]) listUri.append(uri) stockName = stock[1] startDate = endDate - timedelta(days=int(stock[3])) download(stockName, uri, startDate, endDate, 'w') return listUri
def downloadWindow(dump, window): listUri = {} endDate = window.end for stock in FileLoader.loadNSEData(): uri = "/".join(dump, getFileName(stock)) listUri.append(uri) stockName = stock[1] startDate = window.start download(stockName, uri, startDate, endDate, 'a') return listUri
def test_merge_count_inversions(self): # arrange aFile = os.path.join(os.path.dirname(__file__), "sample_files", "IntegerArray.txt") list_of_integers = FileLoader.load_list_of_long_integers(aFile) A = list_of_integers[0:3] B = list_of_integers[4:7] # act merged_result, inversion_count = ListSorter.MergeAndCountInversions(A, B) # assert self.assertTrue(len(merged_result) > 0 and inversion_count > 0)
def test_find_minimum_cut(self): # arrange adjacency_list = os.path.join(os.path.dirname(__file__), "sample_files", "kargerMinCut.txt") adjacency_map = FileLoader.load_adjacency_map(adjacency_list) # act global_minimum_cut = kargerMinCut.edge_contraction(copy.deepcopy(adjacency_map)) #looping for sample for i in range(1, 200): this_min_cut = kargerMinCut.edge_contraction(copy.deepcopy(adjacency_map)) if this_min_cut < global_minimum_cut: global_minimum_cut = this_min_cut print "iteration {}: minimum cut is {}".format(i,global_minimum_cut) # assert self.assertTrue(global_minimum_cut > 0)
from FileLoader import * import sys import pdb #labelsDir=FileLoader(sys.argv[1]) imgsDir=FileLoader(sys.argv[1]) #for path in labelsDir.getRelativeFilePaths(): # print path imgsDir.filterByExt(sys.argv[2]) for path in imgsDir.getFilePaths(): print path
def save_network(self, save_filename): fl.store_layers(save_filename, self.layers)
def test_file_loader(self): inputfile = os.path.join(os.path.dirname(__file__), "sample_files", "dijkstraData.txt") result = FileLoader.load_adjacency_list(inputfile) self.assertTrue(len(result) > 0)
import random import FileLoader as fl from GenAlgorithm import GenAlgorithm from GreedyAlgorithm import GreedyAlgorithm import time import numpy as np from RandAlgorithm import RandAlgorithm if __name__ == "__main__": cities, coord_type = fl.load_cities("Data/gr666.tsp") greedy_alg = GreedyAlgorithm(cities, coord_type) rand_alg = RandAlgorithm(cities, coord_type, 1000) # init best: pop_size 50, mut_prob 0.08, cross_prob: 0.7 pop_sizes_factors = [1] mut_probs_factors = [0.08] cross_probs = [0.7] fitnesses = 0. for pop_size in pop_sizes_factors: for mut_prob in mut_probs_factors: for cross_prob in cross_probs: #for i in range(5): genetic_alg = GenAlgorithm(cities, coord_type, pop_size, mut_prob, cross_prob, 5, 1) genetic_alg.run(60) print("pop_size:", pop_size, "mut prob:", mut_prob, "cross_prob:", cross_prob) print("Fitness:", genetic_alg.Best_solution.Fitness) #fitnesses+=genetic_alg.Best_solution.Fitness
#!/usr/bin/env python from FileLoader import * import sys import pdb import numpy as np imgsDir=FileLoader(sys.argv[1]) imgsDir.filterByExt('.jpg') with open ('imageFiles.txt','w') as f: f.write("\n".join(imgsDir.getFilePaths())) label = list() for path in imgsDir.getFilePaths(): tmp=path.split('/') tmp1 = tmp[-1].split('_') label.append(tmp1[0]+' '+tmp1[1]) with open ('imageLabels.txt','w') as g: g.write("\n".join(label))
import glob import FileLoader import OrderParameter #import CorrelationPlots #import ContourPlots #import OrderVsDisorder print("Starting Analysis") FileLoader.OPinit() progress_counter = 0 for i in sorted( glob.glob( "/media/jay/0FD90FF80FD90FF83/PROJECTDATA/2DPERIODIC_2/Data/*.csv") ): #Track progress of analysis print(progress_counter) #Check if first letter of filename is F for FHN, index 38 to remove path from consideration. if i[59] == "v": df, params = FileLoader.FileLoader(i) print(params) OrderParameter.OP(i, df, params[0], params[1], params[2], params[3], params[4]) progress_counter += 1 op_csv = FileLoader.OPread() #CorrelationPlots.Correlation(op_csv) #ContourPlots.ContourPlots(op_csv) #OrderVsDisorder.OrderVsDisorder(op_csv)
def errorPage(request): return HttpResponse(FileLoader.get(name='errorPage'))
from FileLoader import * import nltk from nltk.corpus import stopwords import re from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity import random from textblob import TextBlob import warnings import tkinter from tkinter import * warnings.filterwarnings("ignore") file = FileLoader("reuters_headlines.csv") data = file.read_file() data.insert(data.shape[1], 'Sentiment', 0) for i in range(len(data)): corpus = TextBlob(data['Headlines'][i] + ' ' + data['Description'][i]) if(corpus.sentiment.polarity > 0): data['Sentiment'][i] = "Positive" elif(corpus.sentiment.polarity < 0): data['Sentiment'][i] = "Negative" else: data['Sentiment'][i] = "Neutral" # print(data.info()) # print(data.Sentiment.value_counts()) data_copy = data.copy()
def updateScreen(self, filePath): _translate = QtCore.QCoreApplication.translate self.file = FileLoader.PresetFile(filePath) self.presetName.setText( _translate("FileStartWidget", "\t" + self.file.getName())) setStr = ( "\tTest Details:\n\t " + str(self.file.getDetails()) + "\t\n\tRun Time (hr:min:sec):\t\n\t " + str(self.file.getTimeRead()) + "\n\tMax Temp:\n\t " + str(self.file.getMaxTemp()) + " °C\n\tMax Force:\n\t {:,.0f}".format(self.file.getMaxForce()) + " N\n\tMin Pressure:\n\t " + str(self.file.getMinPressure()) + " Pa\n\tLast Time Accessed:\n\t " + str(self.file.getLastAccess())) self.presetDetails.setText(_translate("FileStartWidget", setStr)) self.a2 = pg.AxisItem("left") self.a3 = pg.AxisItem("left") self.v2 = pg.ViewBox() self.v3 = pg.ViewBox() #l = pg.GraphicsLayout() self.graphWidget.addItem(self.a2, row=2, col=2, rowspan=1, colspan=1) self.graphWidget.addItem(self.a3, row=2, col=1, rowspan=1, colspan=1) self.pI = pg.PlotItem() self.v1 = self.pI.vb self.graphWidget.addItem(self.pI, row=2, col=3, rowspan=1, colspan=1) self.graphWidget.scene().addItem(self.v2) self.graphWidget.scene().addItem(self.v3) self.a2.linkToView(self.v2) self.a3.linkToView(self.v3) self.v2.setXLink(self.v1) self.v3.setXLink(self.v2) self.pI.getAxis("left").setLabel('Force (N)', color='#FFFFFF') self.a2.setLabel('Pressure (pa)', color='#2EFEF7') self.a3.setLabel('Temperature (°C)', color='#FF0000') self.v1.addItem( pg.PlotCurveItem(self.file.getTimeLst(), self.file.getForceLst(), pen='#FFFFFF')) self.v2.addItem( pg.PlotCurveItem(self.file.getTimeLst(), self.file.getPressLst(101000), pen='#2EFEF7')) self.v3.addItem( pg.PlotCurveItem(self.file.getTimeLst(), self.file.getTempLst(20), pen='#FF0000')) self.v1.sigResized.connect(self.updateViews) self.v2.enableAutoRange(axis=pg.ViewBox.XYAxes, enable=True) self.v3.enableAutoRange(axis=pg.ViewBox.XYAxes, enable=True) self.updateViews()
ndarray = np.pad(ndarray, ((int((max_corpus_length - len(list)) / 2), int((max_corpus_length - len(list)) / 2)), (0, 0)), 'constant', constant_values=(0, 0)) else: ndarray = np.pad(ndarray, ((int((max_corpus_length - len(list)) / 2), int((max_corpus_length - len(list)) / 2) + 1), (0, 0)), 'constant', constant_values=(0, 0)) return ndarray file = FileLoader("Womens Clothing E-Commerce Reviews.csv") data = file.read_file() print(data.info()) data.drop(labels=['Clothing ID', 'Title'], axis=1, inplace=True) data = data[~data['Review Text'].isnull()] # ros = RandomOverSampler(random_state=0) # data_resampled, label_resampled = ros.fit_resample(pd.DataFrame(data['Review Text']), data["Recommended IND"]) # duplicate = data[data["Recommended IND"].isin([0])] # print(duplicate) # data = pd.concat([data,duplicate,duplicate]) print(data) preprocessed_data = Preprocessing(data) preprocessed_data.error_cleaning("Review Text")
from wordcloud import WordCloud from nltk.stem import WordNetLemmatizer from nltk.corpus import stopwords from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.svm import LinearSVC from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer import xgboost as xgb from sklearn.metrics import accuracy_score pd.set_option('display.width', 150) pd.set_option('display.max_colwidth', 150) training_data = FileLoader("Covid_train_data.csv") df_training = training_data.read_file() testing_data = FileLoader("Covid_test_data.csv") df_testing = testing_data.read_file() print(df_training.info()) print(df_testing.info()) df_training["Sentiment"].value_counts().plot(kind='bar') plt.xlabel("Sentiment") plt.ylabel("Counts") plt.title("Proportion of sentiments") plt.show() def sentiment_extraction(df, column, label):
def chooseLoaderFromExtension(filename): print(filename[-5:]) if filename[-5:] == '.json': return lambda path: FileLoader.jsonLoader(path) elif filename[-5:] == '.yaml': return lambda path: FileLoader.yamlLoader(path)
#!/usr/bin/env python from FileLoader import * import sys import pdb import numpy as np emotions=dict({0:'neutral', 1:'anger', 2:'contempt', 3:'disgust', 4:'fear', 5:'happy', 6:'sadness', 7:'surprise'}) labelsDir=FileLoader('/media/hwlee/DATA/dataset/CohnKanade+/Emotion') imgsDir=FileLoader('/media/hwlee/DATA/dataset/CohnKanade+/cohn-kanade-images') content = labelsDir.loadAllFiles() Emotionlabels=[] for num in content: Emotionlabels.append(int(float(num[0]))) Emotions4imgsTable=dict(zip(labelsDir.getRelativeParentFolderPaths(),Emotionlabels)) personLabel = [lab.split('/')[0] for lab in labelsDir.getRelativeParentFolderPaths()] labels = [] count=0 for path in imgsDir.getRelativeParentFolderPaths(): try: print path labels.append(emotions[Emotions4imgsTable[path]]+' '+path.split('/')[0]) count=count+1 except: imgsDir.removeFile(count) imgsDir.setLabel(labels) #pdb.set_trace() with open ('imageFiles.txt','w') as f: f.write("\n".join(imgsDir.getFilePaths())) with open ('imageLabels.txt','w') as g:
def test_file_loader_can_load_list_of_x_y_edges_as_2tuples(self): edge_list = os.path.join(os.path.dirname(__file__), "sample_files", "SCC.txt") result = FileLoader.compute_scc_from_edge_list(edge_list) self.assertTrue(len(result) > 0)
def __init__(self): super(canSnifferGUI, self).__init__() self.setupUi(self) self.portScanButton.clicked.connect(self.scanPorts) self.portConnectButton.clicked.connect(self.serialPortConnect) self.portDisconnectButton.clicked.connect(self.serialPortDisconnect) self.startSniffingButton.clicked.connect(self.startSniffing) self.stopSniffingButton.clicked.connect(self.stopSniffing) self.saveSelectedIdInDictButton.clicked.connect( self.saveIdLabelToDictCallback) self.saveSessionToFileButton.clicked.connect(self.saveSessionToFile) self.loadSessionFromFileButton.clicked.connect( self.loadSessionFromFile) self.showOnlyIdsLineEdit.textChanged.connect( self.showOnlyIdsTextChanged) self.hideIdsLineEdit.textChanged.connect(self.hideIdsTextChanged) self.clearLabelDictButton.clicked.connect(self.clearLabelDict) self.serialController = serial.Serial() self.mainMessageTableWidget.cellClicked.connect(self.cellWasClicked) self.newTxTableRow.clicked.connect(self.newTxTableRowCallback) self.removeTxTableRow.clicked.connect(self.removeTxTableRowCallback) self.sendTxTableButton.clicked.connect(self.sendTxTableCallback) self.abortSessionLoadingButton.clicked.connect( self.abortSessionLoadingCallback) self.showSendingTableCheckBox.clicked.connect( self.showSendingTableButtonCallback) self.addToDecodedPushButton.clicked.connect(self.addToDecodedCallback) self.deleteDecodedPacketLinePushButton.clicked.connect( self.deleteDecodedLineCallback) self.decodedMessagesTableWidget.itemChanged.connect( self.decodedTableItemChangedCallback) self.clearTableButton.clicked.connect(self.clearTableCallback) self.sendSelectedDecodedPacketButton.clicked.connect( self.sendDecodedPacketCallback) self.playbackMainTableButton.clicked.connect( self.playbackMainTableCallback) self.stopPlayBackButton.clicked.connect(self.stopPlayBackCallback) self.hideAllPacketsButton.clicked.connect(self.hideAllPackets) self.showControlsButton.hide() self.serialWriterThread = SerialWriter.SerialWriterThread( self.serialController) self.serialReaderThread = SerialReader.SerialReaderThread( self.serialController) self.serialReaderThread.receivedPacketSignal.connect( self.serialPacketReceiverCallback) self.fileLoaderThread = FileLoader.FileLoaderThread() self.fileLoaderThread.newRowSignal.connect( self.mainTablePopulatorCallback) self.fileLoaderThread.loadingFinishedSignal.connect( self.fileLoadingFinishedCallback) self.hideOldPacketsThread = HideOldPackets.HideOldPacketsThread() self.hideOldPacketsThread.hideOldPacketsSignal.connect( self.hideOldPacketsCallback) self.stopPlayBackButton.setVisible(False) self.playBackProgressBar.setVisible(False) self.sendingGroupBox.hide() self.hideOldPacketsThread.enable(5) self.hideOldPacketsThread.start() self.scanPorts() self.startTime = 0 self.receivedPackets = 0 self.playbackMainTableIndex = 0 self.labelDictFile = None self.idDict = dict([]) self.showOnlyIdsSet = set([]) self.hideIdsSet = set([]) self.idLabelDict = dict() self.isInited = False self.init() if not os.path.exists("save"): os.makedirs("save") for i in range(5, self.mainMessageTableWidget.columnCount()): self.mainMessageTableWidget.setColumnWidth(i, 32) for i in range(5, self.mainMessageTableWidget.columnCount()): self.decodedMessagesTableWidget.setColumnWidth(i, 32) self.decodedMessagesTableWidget.setColumnWidth(1, 150) self.decodedMessagesTableWidget.horizontalHeader( ).setSectionResizeMode(0, QHeaderView.Stretch) self.txTable.horizontalHeader().setSectionResizeMode( 3, QHeaderView.Stretch) self.showFullScreen()
#tetibop is Text Editor Based On Python #This is special edition for any terminal supports Curses. #The main file. #Entry point to programm import MainBuffer import FileLoader data = FileLoader.load_file_from_cl() file_name = '' lines = [] if len(data) > 1: file_name = data[1] lines = data[0] app = MainBuffer.MainBuffer(lines, file_name) app.run()
USE_I18N = True USE_L10N = True USE_TZ = True CSRF_USE_SESSIONS = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL = '/static/' # WSGI_APPLICATION = 'Server.wsgi.application' ErrorPage_Path = 'frontend/errorPage/' FileLoader.load(name='errorPage', path=ErrorPage_Path + 'errorPage.html') FRONTEND_PATH = 'frontend/frontpage/build/' FileLoader.load(name='frontpage', path=FRONTEND_PATH + 'index.html') APP_PATH = 'frontend/app/build/' FileLoader.load(name='app', path=APP_PATH + 'index.html') REPOSITORY_ROOT = os.path.dirname(BASE_DIR) STATICFILES_DIRS = [ FRONTEND_PATH + 'static/', APP_PATH + 'static/', ] CORS_ORIGIN_ALLOW_ALL = True
__author__ = 'kingrichard2005' import os import FileLoader import copy import sys import argparse from FileLoader import FileLoader if __name__ == "__main__": parser = argparse.ArgumentParser(prog="SCC" ,description='Computes strongly connected components from edge list') parser.add_argument('-f', '--file' ,help='Edge list' ,dest="my_edge_list"); args = parser.parse_args(); scc_list = FileLoader.compute_scc_from_edge_list(args.my_edge_list) for component in scc_list: print "{0}".format(component)