def remove_transfered_files(self): for remote_filepath in self.transfered: try: self.client.remove(remote_filepath) except Exception: error("Couldn't remove zip file.") self.destroy()
def inferior_IQ(data: [float]) -> None: """Writes a percentage of people with inferior IQ.""" if 0 < data[2] < 200: result = 0 for i in range(0, data[2] * 100): result += 1 / (data[1] * sqrt(2 * pi)) * \ exp(-0.5 * pow(((i / 100) - data[0]) / data[1], 2)) print('{:.01f}% of people have an IQ inferior to {:d}'.format( result, data[2])) else: error('IQ must be integer between 0 and 200.\n')
def class_divider(tmpdata): class1 = [] class2 = [] for i in tmpdata: if i[-1] == 1: class1.append(i[:-1]) elif i[-1] == -1: class2.append(i[:-1]) else: print(i) error('Bad data') exit(0) return class1, class2
def between_IQ(data: [float]) -> None: """Writes a percentage of people with IQ between IQ1 and IQ2.""" if 0 < data[2] < data[3] < 200: result = 0 i = data[2] while (i < data[3]): result += 1 / (data[1] * sqrt(2 * pi)) * \ exp(-0.5 * pow((i - data[0]) / data[1], 2)) i += 0.01 print('{:.01f}% of people have an IQ between {:d} and {:d}'.format( result, data[2], data[3])) else: error('IQ must be integers between 0 and 200, and IQ1 < IQ2.\n')
def connect(self): try: self.client = paramiko.SSHClient() self.client.load_system_host_keys() self.client.set_missing_host_key_policy( paramiko.AutoAddPolicy()) self.client.connect( self.url, username=self.username, password=self.password ) except: error("SSH client couldn't connect.") self.destroy()
def connect(self): try: self.connection = paramiko.SSHClient() self.connection.load_system_host_keys() self.connection.set_missing_host_key_policy( paramiko.AutoAddPolicy()) self.connection.connect( self.url, username=self.username, password=self.password ) self.client = self.connection.open_sftp() except: error("SFTP client couldn't connect.")
def _load_keys(self): """Loads the api keys into the queue.""" cur_path = os.path.dirname(__file__) file_name = os.path.join(cur_path, "api_keys.json") try: f = open(file_name, 'r') key_json = json.load(f) except FileNotFoundError as e: error(e) error("Must Create a json file with api keys") exit(-1) except json.decoder.JSONDecodeError: error("Error: Json file is not properly formatted!") exit(-1) list(map(self._api_keys.put, key_json["keys"]))
# See COPYING and license/LICENSE.steambridge for license information import os import subprocess from utilities import execute, error, Options options = Options() options.load() config = options.configure() build = config['steambridge']['build'] prefix = config['steambridge']['prefix'] if not os.path.isfile(build['bridge_lib']): error("Install", "Winelib bridge library isn't compiled (run make)!") if not os.path.isfile(build['proxy_dll']): error("Install", "Win32 proxy library isn't found") try: # os.makedirs raise an exception(!) if the directory already exists # Thanks Python! execute('mkdir -p {}'.format(prefix['shared'])) execute('mkdir -p {}'.format(prefix['bin'])) execute('mkdir -p {}'.format(prefix['pysteambridge'])) execute('mkdir -p {}'.format(prefix['winedllpath'])) execute('mkdir -p {}'.format(prefix['documentation'])) execute('mkdir -p {}'.format(prefix['licenses'])) # shutils.copy and friends seem to work kinda weird and not Unixy, so just use the system cp
def __download_file(self, url, save_as): try: urllib.request.urlretrieve(url, save_as) except Exception: error("Couldn't download file.") return save_as
def __extract_archive(zipfile, output_dir): # print('Extracting...') try: shutil.unpack_archive(zipfile, output_dir) except Exception: error("Couldn't extract archive.")
test1 = open('inputs/test01.txt') test1Data = np.loadtxt(test1, delimiter=',') test1Data = util.normalizeData(test1Data) errors = [] NUM_INPUTS = len(test1Data) - offset for i in range(1750, NUM_INPUTS): current = test1Data[i] features = util.createFeatureRow(test1Data, i, offset, current) td = np.array(features) predX = neigh.predict(td) predY = neighY.predict(td) actual = test1Data[i + offset] prediction = [predX[0], predY[0]] actuals.append(actual) predictions.append(prediction) errors.append(util.error([actual], [prediction])) if onlyTrainingAndCV == False: # util.plotLines(actuals, predictions, 'Actual position', 'Predicted position') # util.plotGraph(actuals, predictions, 'Actual position', 'Predicted position') util.plotData(actuals, 'Actual position') util.plotData(predictions, 'Predicted position') util.plotLine(errors, 'Error graph') print len(actuals), len(predictions) print np.sum(errors) else: util.plotLine(cvScoresX, 'CV label x') util.plotLine(cvScoresY, 'CV label y') # util.plotLines(cvScoresX, cvScoresY, 'CV label X', 'CV label y')
def __init__(self, github_token): try: self.github = Github(github_token) except: error("Couldn't authenticate with GitHub.")
#starting the simulation for _ in range(25): #generate the pointset P = generate((2, 14), 10.) #store the initial guess PI = P #update the pointset P = update(P, 1.) #compute the EDM D = EDM(P) #add the different errors D0 = add_error(D, error(0., shape)) D1 = add_error(D, error(.5, shape)) D2 = add_error(D, error(1.5, shape)) D3 = add_error(D, error(2., shape)) D4 = add_error(D, error(2.5, shape)) sys.stdout.write("-") sys.stdout.flush() #reconstruct the pointsets W0P0 = rec(PI, D0, W0, 0.0001) W0P1 = rec(PI, D1, W0, 0.0001) W0P2 = rec(PI, D2, W0, 0.0001) W0P3 = rec(PI, D3, W0, 0.0001) W0P4 = rec(PI, D4, W0, 0.0001)
#define the anchors position, which is fixed and known A = np.array([[0., 0., 10., 10.], [0., 10., 0., 10.]], dtype=np.float64) #generate the initial point configuration TAG = generate(shape, side) #Initzialize original and reconstructed pointset OR[:, 0:4] = A OR[:, 4:] = TAG PR[:, 0:4] = A PR[:, 4:] = TAG #compute the EDM and add error D = EDM(OR) # E = error(2., (14, 14)) D = add_error(D, E) #define the mask. Only distances between sensors are missing W = np.ones((14, 14), dtype=np.float64) W[4:, 4:] = np.zeros((10, 10), dtype=np.float64) my_dpi = 96 #constant for figure size #init the progress bar toolbar_width = n_step sys.stdout.write("[%s]" % (" " * toolbar_width)) sys.stdout.flush() sys.stdout.write("\b" * (toolbar_width + 1)) # return to start of line, after #start the simulatin for i in range(n_step):
f = open('inputs/test0' + str(i) + '.txt'); testdata =np.loadtxt(f, delimiter = ','); errors=[] tempdiff=[] for i in range(len(testdata)-numberOfPredictions,len(testdata)): temp = util.getClosestRowFromTraining(testdata[i], trainingData) # prediction x = (F * x) + u P = F * P * F.transpose() # measurement update Z = matrix([temp]) y = Z.transpose() - (H * x) S = H * P * H.transpose() + R K = P * H.transpose() * S.inverse() x = x + (K * y) P = (I - (K * H)) * P actuals.append(testdata[i]) predictions.append([x.value[0],x.value[1]]) errors.append(util.error([testdata[i]], [prediction])) tempdiff.append(util.error([temp], [prediction])) #util.plotData(actuals, 'Actual position') #util.plotData(predictions, 'Predicted position') util.plotLine(errors, 'Error graph') #util.plotLine(tempdiff, 'tempdiff graph') util.plotGraph(actuals, predictions, 'Actual position', 'Predicted position')