def compareGrid(specFlag, globals): Te_MIN = globals.TE_MIN Te_MAX = globals.TE_MAX ne_MAX = globals.NE_MAX ne_MIN = globals.NE_MIN data = Data() data.loadData(1110328120) ps = data.getPolySegData(3,0) fitPolySeg(ps) print "Init density: ", ps.ne0, " Init temp: ", ps.Te0 maxProb = 0 ps.ne0 = -1 ps.Te0 = -1 gridPts = 10.0 gridsearch = zeros((gridPts,gridPts)) i = 0 for Te in logspace(log10(Te_MIN), log10(Te_MAX), gridPts): j = 0 for ne in logspace(log10(ne_MIN), log10(ne_MAX), gridPts): Nphotons = specPhotons(ne, Te, ps, specFlag) prob = - calcModelProbability(ps, globals, Nphotons) gridsearch[i,j] = prob if prob > maxProb: maxProb = prob ps.ne0 = ne ps.Te0 = Te j += 1 i += 1 return gridsearch, maxProb, ps
def StoreJSONFile(self, fileId : int, jsonFile) -> None: data = Data() data.InitFromBuffer(fileId, jsonFile.decode(), False, True) dataBlock = self.buffer.GetDataBlock(self.currentBlockIndex) while True: if dataBlock.GetFreeSpaceSize() < data.GetLenght(): dividedData = data.Split(dataBlock.GetFreeSpaceSize(), self.GetNextFreeDataBlock()) self.buffer.WriteOnDataBlock(self.currentBlockIndex, dividedData[0]) self.currentBlockIndex = self.GetNextFreeDataBlock() dataBlock = self.buffer.GetDataBlock(self.currentBlockIndex) data = dividedData[1] else: if data.isFragmented: data.nextDatablock = None self.buffer.WriteOnDataBlock(self.currentBlockIndex, data) break
def compareGrid(specFlag): from ..globals import global_settings #call global variables Te_MIN; Te_MAX; NE_STEPS; TE_STEPS; # Saturation levels ACQIRIS_MIN; ACQIRIS_MAX; STRUCK_MIN; STRUCK_MAX; # Laser wavelength LASER_LAM; # Logbook quality LOGBOOK_TRUE; LOGBOOK_UNSURE; globals = init_globals() Te_MIN = globals[0];Te_MAX = globals[1]; NE_STEPS = globals[2]; TE_STEPS = globals[3]; ACQUIRIS_MIN = globals[4]; ACQUIRIS_MAX = globals[5]; STRUCK_MIN = globals[6]; STRUCK_MAX = globals[7]; LASER_LAM = globals[8]; LOGBOOK_TRUE = globals[9]; LOGBOOK_UNSURE = globals[10] #global_settings.init_globals() data = Data() data.loadData(1110328120) ps = data.getPolySegData(3,0) fitPolySeg(ps) print "Init density: ", ps.ne0, " Init temp: ", ps.Te0 maxProb = 0 ps.ne0 = -1 ps.Te0 = -1 gridPts = 10.0 gridsearch = zeros((gridPts,gridPts)) i = 0 for Te in logspace(log10(Te_MIN), log10(Te_MAX), gridPts): j = 0 for ne in logspace(log10(ne_MIN), log10(ne_MAX), gridPts): Nphotons = specPhotons(ne, Te, ps, specFlag) prob = - calcModelProbability(ps, Nphotons) gridsearch[i,j] = prob if prob > maxProb: maxProb = prob ps.ne0 = ne ps.Te0 = Te j += 1 i += 1 return gridsearch, maxProb, ps
def move(self, me, world, game, move): if DebuggerWindow is not None: newData = Data() newData.clone(self.data) self.data = newData self.debug.log(me, world, game, move, newData) self.data.clearCurrent(DebuggerWindow is not None) self.analyzeWorld(me, world, self.data)
def create_data(): training_files = _get_training_files() series_training_files = _get_series_of_files(training_files) for series in series_training_files: data = Data(False, False) data.training = TrainingSet(False, False, *series) yield data
def __init__(self, path_rendered, dpkgRoot, dpkg, **kwargs): Ccoder.__init__(self, dpkgRoot, dpkg, **kwargs) Data.__init__(self, path_rendered, dpkgRoot, dpkg, **kwargs) self.path_rendered = os.path.abspath(unicode(path_rendered, 'utf8')) self.env = Environment(loader=FileSystemLoader(os.path.join(self.path_rendered, 'C', 'templates'))) self.env.filters.update({ 'is_prior': lambda x: ('data' in x) and isinstance(x['data'], dict) and ('data' in x['data']) and ('distribution' in x['data']['data']) })
def main(): d = Data(computeSamples = False) trainIndices, testIndices = d.splitData() algo = Algorithm(d.getTrainSample(), d.getTestSample()) # algo.computeRegressionInput() # algo.predictXGboost() algo.predictNN() # algo.predict() algo.writeSubmission() print(algo.evalPrecision())
def read(self, string, format): try: mol = pybel.readstring(format, string) except ValueError: raise FormatUnsupported("Can't recognize the format '%s' supplied"%format) atoms = [(atom.atomicnum,atom.coords) for atom in mol.atoms] data = Data() data.build_molecule(atomlist = atoms, multiplicity = mol.spin, charge = mol.charge ) return data
class PartialScore: def __init__(self, coeff, query, data_handler, ids): logger.info("Initializing Score object") self.data = Data(data_handler) self.data = self.data.get_clean_datab(query) self.coeff = coeff self.requested_ids = ids self.score = {} self.score_details = {} self.continuous_variables = ["PUISSANCE_PERSONNE", "TAUX_DE_TRANSFORMATION", "PRIX_PERSONNE", "PRIX_METRE", "PRIX_PUISSANCE", "DELAIS_MOYEN_REPONSE_MESSAGES"] self.estimate = PartialEstimation() logger.info("Score object initialized") def __get_score(self): logger.info("Starting score computation") row_iterator = self.data.iterrows() count = 0 # iteration counter for lazy people for i, row in row_iterator: count += 1 thread = ComputeZscore(self.estimate, row, i, self.coeff) thread.start() dic, ind = thread.join() self.score[ind] = dic logger.info("Computed %i scores"%count) return self.score def print_for_flo(self): self.__get_score() list_to_print = [] for i in self.requested_ids: if int(i) in self.score: list_to_print.append(str(self.score[int(i)]["SCORE"])) print ', '.join(list_to_print)
def _run(self): data = Data() data.training = self.training system = System(data) system.use_best_feature_set() system.create_features() system.train() system.eval(quiet=True) self.result_event_event = system.evaluation_accuracy_event_event self.result_event_timex = system.evaluation_accuracy_event_timex
def __init__(self, debug = True): """Start Game""" data = Data(debug) #Load Data and View #Initialize frames data.timer = QTimer() QObject.connect(data.timer, SIGNAL("timeout()"), self.frame) data.timer.start(1000) data.view.widMain.show() #Start mainloop exit(data.view.app.exec_()) #Mainloop is finished. Close Window
def MLP(): # Training Data data = Data('../train.csv.zip', 'train.csv', 5, 10, 10, 10, 10, 10, 10, 10, 2500) outputs = data.outputs() inputs = data.inputs() training_data = zip(inputs, outputs) # Test Data data_test = Data('../test.csv.zip', 'test.csv', 5, 10, 10, 10, 10, 10, 10, 10, 250) #outputs_test = data_test.outputs() inputs_test = data_test.inputs() test_data = inputs_test #Training Model net = Network([90, 500, 3388, 2]) net.SGD(training_data, 1, 200, 0.1, test_data=test_data)
def program(): '''main program for the Assignment 10''' plt.close('all') filename = raw_input('Please enter the filename of the restaurant_grade:\n') print '\n ' print "Data procession, please wait patiently...\n" raw_data = Data(filename) # create data instance df = raw_data.clean() # use class method to process data into a cleaner dataframe print '\n ' all_restaurant_sum(df) # print out answer to Q4 sum_by_boro(df) # print out answer to Q4 graphs(df) # generate plots for Q5 print "Please check the save pdf files for the corresponding graphs."
def get_data(): response = {} try: dataobj = Data() outlist = dataobj.get_data() out_list = {} out_list['data'] = outlist return json.dumps(out_list) except Exception as e: response['success'] = False response['data'] = [] response['message'] = e return_obj = jsonify(response) return_obj.status_code = 500 return return_obj
def __str__(self,indent=''): """ This function is used for printing the class. Assumptions: N/A Source: N/A Inputs: N/A Outputs: N/A Properties Used: N/A """ try: args = self._diff.__str__(indent) args += indent + '_base : ' + self._base.__repr__() + '\n' args += indent + ' tag : ' + self._base.tag + '\n' return args except AttributeError: return Data.__str__(self,indent)
def __init__(self, client): AppResponse.__init__(self, client) self.dependency(js='jquery2') self.dependency(css='fontawesome') self.__idx = int(client.arg("idx")) self.data = Data() self.standings = Standings(self.data)
def __init__(self, client): AppResponse.__init__(self, client) self.dependency(js='jquery') self.dependency(js='/apps/rbkweb/MatchBetEditor.js') self.dependency(css='fontawesome') self.__idx = int(client.arg("idx")) self.data = Data()
def click_toolbtnEnviar(self, widget, data=None): self.datos = Data('mails/mail.txt') self.datos.file_to_open() texto = self.datos.read_to_file() texto = texto.replace("\n",",") sMail = Mail("*****@*****.**","[email protected],"+texto,"Hola mundo de leo","Si hola mundo de leo.") sMail.connect_to_mail("*****@*****.**","*****@*****.**") sMail.send_to_mail()
def __init__(self, nomboite, projet): self.__nomboite = nomboite self.__listGene = list() self.__listCond = list() self.__dicoWell = dict() self.__projet = projet self.__data = Data(self.__projet) print self.__run()
def update_prices(self): self.prices_given = Data.getInstance().get_prices_dict() self.price = self.prices_given[self.type] * self.price_qualifier[ self.size] for keys, values in self.toppings.items(): if keys not in self.pizza_to_toppings[self.type]: self.price += self.prices_given[keys] * self.toppings[keys] elif self.toppings[keys] > self.pizza_to_toppings[self.type][keys]: self.price += self.prices_given[keys] * ( self.toppings[keys] - self.pizza_to_toppings[self.type][keys])
def new_user(): data = Data() if request.method == "POST": username = request.json.get('username') password = request.json.get('password') if username is None or password is None: abort(400) # missing arguments if data.check_user(username): abort(400) # existing user username, id = data.create_user(username, password) return jsonify({'username': username}), 201, { 'Location': url_for('get_user', id=id, _external=True) } elif request.method == "GET": return data.get_users()
def train(self): init_data = Data(self.pos_path, self.neg_path) positive_pairs, negative_pairs = init_data.split_in_pairs() init_pos = InitMatrices(positive_pairs) trans_matrix_pos = init_pos.compute_transitions() emission_matrix_pos = init_pos.compute_emissions() init_neg = InitMatrices(negative_pairs) trans_matrix_neg = init_neg.compute_transitions() emission_matrix_neg = init_neg.compute_emissions() self.model_pos = HMMConstruct(trans_matrix_pos, emission_matrix_pos, self.pos_path) self.model_pos.build_eval_HMM() self.model_neg = HMMConstruct(trans_matrix_neg, emission_matrix_neg, self.neg_path) self.model_neg.build_eval_HMM()
def reduce(self, reduction_f, initial_accum): # Reduce file by file accum = initial_accum for x in self.file_iterator(): blocks = [ Data.expand_array_in_blocks(element, self.block_length, self.offsets[i]) for i, element in enumerate(x) ] accum = reduction_f(accum, *blocks) return accum
def InputOrCorrectMark(): while True: MarkNum = input('请输入所需要录入成绩作业的作业号') rs = Data.readFile('第' + str(MarkNum) + '次作业') print('该次作业所有同学的成绩' + '\n') print(rs) while True: student = input('请输入所需要录入成绩学生的学号') Mark = input('请输入成绩') oncemark = ['学号' + str(student), Mark] for i in rs: if i[0] == oncemark[0]: i[1] = oncemark[1] Data.rewriteFile('第' + str(MarkNum) + '次作业', rs) key = input('是否继续输入成绩(1继续,0返回)') if key == '0': break key = input('是否继续输入其他作业号的成绩(1继续,0返回)') if key == '0': break
def postAnalysisCalculations(self): """ Makes the calculations and sets statistics following a trading simulation. @rtype: None """ data = Data.getInstance() self.positionSize = self.position * data.stockData[len(data.stockData) - 1][1] self.cash = self.cash - self.commissionTotal self.PL = self.cash - self.cashInitial + self.positionSize
def test_018(self): tree = DecisionTreeRegressor() tree.target_class = 'V1' file = open("data/bank-marketing.arff") data = Data(file) #data.summary() #node = Node(data = data) #split = tree.find_best_split(node) #print(split) file.close()
def average(): """ Runs Web Service :param: time: user inputted as json dictionary :param: voltage: user inputted as json dictionary :param: averaging_period: user inputted as json dictionary :rtype: json dictionary output of time_interval, average_heart_rate, tachycardia_annotations, brachycardia_annotations """ hr = np.array([]) brachy_output = [] tachy_output = [] j_dict = request.json try: j_dict = json.dumps(j_dict) j_dict = json.loads(j_dict) # load is for file, loads is for string except ValueError: return send_error("Input is not JSON dictionary", 600) t = np.array(j_dict['time']) v = np.array(j_dict['voltage']) avg_period = np.array(j_dict['averaging_period']) try: data_checker = Data(t, v) if data_checker.value_range_result is True \ & data_checker.data_type_result is True: hr = np.column_stack((t, v)) except ValueError: pass peak_data = Processing() peak_data.ecg_peakdetect(hr) peak_times = peak_data.t hr_data = Vitals(peak_times, hr[:, 0]) avg_hr_array = hr_data.avg_hr_array try: avg_hr_diagnosis = Diagnosis(avg_hr_array) brachy_output = avg_hr_diagnosis.brachy_result tachy_output = avg_hr_diagnosis.tachy_result except ValueError as Inst: print(Inst.args) send_error(Inst.args, 400) avg_period_dict = {"averaging_period": avg_period.tolist()} time_dict = {"time_interval": t.tolist()} avg_hr_dict = {"average_heart_rate": avg_hr_array} tachy_dict = {"tachycardia_annotations": tachy_output} brachy_dict = {"brachycardia_annotations": brachy_output} average_content = jsonify( [avg_period_dict, time_dict, avg_hr_dict, tachy_dict, brachy_dict]) return average_content
def trainTriplet(self, x, y, epochs, batchSize=10):#change for 3 inputs/outputs # Train the network data = Data(x,y) #numBatches=numSamples//batchSize #numS #amples=batchSize*numBatches for i in range(epochs): data.assemple_in_triplets() numBatches=len(data.list_of_triplets)//batchSize numSamples=batchSize*numBatches for j in range(0,numSamples,batchSize): X = data.get_nextBatch(batchSize) x0=X[:,0,:,:] xPlus=X[:,1,:,:] xMinus=X[:,2,:,:] input2 = x[j+batchSize:j+2*batchSize] _, trainingLoss = self.sess.run([self.optimizer, self.loss], feed_dict = {self.tf_input0: x0, self.tf_inputPlus: xPlus, self.tf_inputMinus: xMinus}) #print(trainingLoss) print('iteration %d: train loss %.3f' % (i, trainingLoss))
class VideoBrowser: def __init__(self): self.config = ConfigManager(os.environ['HOME']+'/.VideoBrowser/VideoBrowser.conf') self.data = Data(self.config.get_value('FileSystem','moviedir'),self.config.get_value('FileSystem','cachedir')) self.files = self.data.get_files("avi") self.gui = Gui(self.files) def main(self): gtk.main() return
def parse_dict(json_data): """ From an dictionary, it returns a Data list """ parsed_dataset = [] for kr_data, r_data in json_data.items(): if isinstance(r_data, dict): parsed_dataset.append( Data(kr_data, r_data['name'], r_data['level'].lower(), r_data['priority'].lower())) return parsed_dataset
def __init__(self): self.state = "Title" self.width = 640 self.height = 480 self.meta = 25 self.win = pg.display.set_mode((640, 480)) self.font = pg.font.Font("simkai.ttf", 25) self.gd = None self.data = Data() self.buttonPic = pg.image.load("./pic/Button.png").convert_alpha() self.winPic = pg.image.load("./pic/win.png").convert_alpha()
def onProfileClicked(self): """ Sets the strategy used in the simulator to the strategy with this profile's strategyIndex. @rtype: None """ analyzer = Analysis.getInstance() analyzer.strategy = self.strategyIndex data = Data.getInstance() data.setDataTimerToReset()
def __init__(self, resultado): self.analisadores = [] self.dataC = Data('C', 'data/C') self.analiseC = Analisador(self.dataC, resultado) self.analisadores.append(self.analiseC) self.dataL = Data('L', 'data/L') self.analiseL = Analisador(self.dataL, resultado) self.analisadores.append(self.analiseL) self.dataV = Data('V', 'data/V') self.analiseV = Analisador(self.dataV, resultado) self.analisadores.append(self.analiseV) self.dataVH = Data('VH', 'data/VH') self.analiseVH = Analisador(self.dataVH, resultado, gatilho='V', criterio=0.8) self.analisadores.append(self.analiseVH) self.dataI = Data('I', 'data/I') self.analiseI = Analisador(self.dataI, resultado) self.analisadores.append(self.analiseI) self.dataA = Data('A', 'data/A') self.analiseA = Analisador(self.dataA, resultado, 0.87) self.analisadores.append(self.analiseA) self.dataIX = Data('IX', 'data/IX') self.analiseIX = Analisador(self.dataIX, resultado, gatilho='I') self.analisadores.append(self.analiseIX) self.dataMaoAberta = Data('MaoAberta', 'data/MAO_ABERTA') self.analiseMaoAberta = Analisador(self.dataMaoAberta, resultado, criterio=0.8) self.analisadores.append(self.analiseMaoAberta) self.dataMaoAbertaLado = Data('MaoAbertaLado', 'data/MAO_ABERTA_LADO') self.analiseMaoAbertaLado = Analisador(self.dataMaoAbertaLado, resultado, gatilho='MaoAberta', criterio=0.8) self.analisadores.append(self.analiseMaoAbertaLado)
class Maincontrol(object): def __init__(self): self.builder = gtk.Builder() self.builder.add_from_file("Form_Main.glade") self.window = self.builder.get_object("form") self.builder.connect_signals(self) self.window.show_all() def config(self, dominio, asunto): self.dominio = dominio self.asunto = asunto def click_toolbtnConfiguracion(self, widget, data=None): wConfiguracion = DConfiguracion() saveW = wConfiguracion.show() #if wConfiguracion.window.hide_on_delete(): if True: print "d" else: print "c" print "xD" def click_toolbtnRecuperar(self, widget, data=None): wRecuperar = DRecuperar() wRecuperar.show() def click_toolbtnMensaje(self, widget, data=None): wMensaje = DMensaje() wMensaje.show() print a def click_toolbtnEnviar(self, widget, data=None): self.datos = Data('mails/mail.txt') self.datos.file_to_open() texto = self.datos.read_to_file() texto = texto.replace("\n",",") sMail = Mail("*****@*****.**","[email protected],"+texto,"Hola mundo de leo","Si hola mundo de leo.") sMail.connect_to_mail("*****@*****.**","*****@*****.**") sMail.send_to_mail()
def fitShot(shotNum, specFlag = "tsc", numProcs = None, burstLen = 0): """This function fits an entire shot. This version will use up to the number of detected processors on the machine if the multiprocessing module is installed. The multiprocessing module is a standard part of python 2.6, and has been backported to 2.5: http://code.google.com/p/python-multiprocessing/ Parameters: shotNum -- The shot number to fit. For example 1070814040. numProcs -- The number of processes to create. Defaults to the number of CPUs in the machine. burstLen -- For culling of extra Fast Thomson laser diode pulses Default of 0 acts like old system, keeping all pulses Any other value n will cause laser diode to skip pulses ~200 us after n pulses before looking for next burst """ from globals import init_globals glob = init_globals() Te_MIN = glob[0] Te_MAX = glob[1] NE_STEPS = glob[2] TE_STEPS = glob[3] ACQIRIS_MIN = glob[4] ACQIRIS_MAX = glob[5] STRUCK_MIN = glob[6] STRUCK_MAX = glob[7] LASER_LAM = glob[8] LOGBOOK_TRUE = glob[9] LOGBOOK_UNSURE = glob[10] #Load data from Data import Data data = Data() try: data.loadData(shotNum) except Exception, ex: print "Failed to fit shot:", ex return None
def get_translations(start=0, end=1, type='mt'): from Data import Data dataset = Data().get_data() out = [] if type == 'mt': for a in range(start, end): out.append(dataset[a][1]) return out else: for a in range(start, end): out.append(dataset[a][2]) return out
def __defaults__(self): """ A stub for all classes that come later Assumptions: N/A Source: N/A Inputs: N/A Outputs: N/A Properties Used: N/A """ self.tag = 'config' self._base = Data() self._diff = Data()
def updateChart(self): """ Updates the stock's chart based on new input data from singleton classes. @rtype: None """ self.data = Data.getInstance() self._tradeSignals = self._data.tradeSignals self._highestPrice = self._data.maxHighInData() self._lowestPrice = self._data.minLowInData() self._pixelDensity = 0.6 * height / (self._highestPrice - self._lowestPrice)
def append(self, val): """ Appends the value to the containers Assumptions: None Source: N/A Inputs: self Outputs: N/A Properties Used: N/A """ #val = self.check_new_val(val) Data.append(self, val)
def getFromDatabase(): start = time.process_time() myclient = pymongo.MongoClient(LOCALHOST) mydb = myclient["mydatabase"] elements = mydb["elements"].find() dataArray = [] for element in elements: item = Data(element['Type'], element['TimeStamp'], element['CoordX'], element['CoordY']) dataArray.append(item) end = time.process_time() return dataArray, '%.3f' % (end - start)
def __init__(self, weight_input_hidden, weight_hidden_output, input_b, hidden_b, d=Data()): d._set_input_b(input_b) d._set_hidden_b(hidden_b) d._set_weight_input_hidden(weight_input_hidden) d._set_weight_hidden_output(weight_hidden_output) d._set_input_b(input_b) d._set_hidden_b(hidden_b)
def __init__(self): self._wait_for_params() self.rate = rospy.Rate(10) self.data = Data(rospy.get_param("/orders_list")) self.txt_to_orders = TxtToOrders(self.data.separator, self.data.orders) self.dict_publisher = rospy.Publisher("~dictionary", Dictionary, queue_size=1) self.txt_to_orders_srv = rospy.Service("~txt_to_orders", TxtToOrdersSrv, self.convert)
def _create_forward(self, d=Data()): input_b = d._get_input_b() hidden_b = d._get_hidden_b() input_xs = d._get_input_xs() weight_input_hidden = d._get_weight_input_hidden() output_hidden = self._create_layer(input_xs, weight_input_hidden, input_b) d._set_output_hidden(output_hidden) weight_hidden_output = d._get_weight_hidden_output() output_ys = self._create_layer(output_hidden, weight_hidden_output, hidden_b) d._set_output_ys(output_ys)
def main(argv): # initialize input args if len(argv) < 4: print('Not enough arguments provided.') exit(1) input_path = argv[1] links_list = [(item) for item in argv[2].strip().split(', ')] for i in range(len(links_list)): links_list[i] = links_list[i].replace('_', '') num_clusters_required = int(argv[3]) # load the data data = Data(input_path) samples = data.create_samples() dist_list = calculate_distances(samples) for link in links_list: agglomer_obj = AC(link, samples) agglomer_obj.run(num_clusters_required, dist_list) print("") print("")
def test_PAM_super(self): """ Test if inheritance is working properly test if euclidean is working properly for both single and dictionary returns """ data = Data('abalone', pd.read_csv(r'data/abalone.data', header=None), 8) # load data df = data.df.sample(n=10) # minimal data frame data.split_data(data_frame=df) # sets test and train data pam = PAM(k_val=2, data_instance=data) # create PAM instance to check super pam_train_data = pam.train_df # train_df is an instance from parent class self.assertTrue(pam_train_data.equals(data.train_df), "Same data") self.assertFalse(pam_train_data.equals(data.test_df)) row_c, row_q = np.split(pam_train_data, 2) # split the same data into size of _, row_comp = next(row_c.copy().iterrows()) # get a row _, row_query = next(row_q.copy().iterrows()) # get a row dict_dist = pam.get_euclidean_distance_dict(row_query, row_c) single_distance = pam.get_euclidean_distance(row_query, row_comp) # get distance self.assertTrue(isinstance(single_distance, float)) # check the it returns a float self.assertTrue(isinstance(dict_dist, dict)) # check if it is a dictionary
def append(self,val): """ Appends the value to the containers Assumptions: None Source: N/A Inputs: self Outputs: N/A Properties Used: N/A """ #val = self.check_new_val(val) Data.append(self,val)
def getCoordinates(data): pointsN = int(data[0]) minSearch = minPosition() coordinates = [] for i in xrange(pointsN): x = data[2 * i + 1] y = data[2 * i + 2] coordinates.append((x, y)) minSearch.update(x, y, i) return Data(coordinates, minSearch.min_pos, pointsN)
def __init__(self, vk, peer): self.state = State() self.rooms = Data.getRooms() self.locations = Data.getLocations() self.peer = str(peer) self.vk = vk self.duty: Duty = None self.new_duty: Duty = None self.old_duty: Duty = None # Remind Duty self.CURRENT_LOCATION = '' self.CURRENT_COMMENT = '' # Update Duty self.CURRENT_DUTY = [] self.CURRENT_ROOMS = [] self.CURRENT_LOCS = []
def single_user(num): response_data = requests.get(f'{user_endpoint}/{num}') try: singleUserData = Data.accessParams(response_data.json()['data']) for i, j in response_data.json()['data'].items(): if not i: return f"Bug with {i}: {j}" return f'GET: {num}. ID = {singleUserData[0]}. Status code - {response_data.status_code}, first_name: {singleUserData[2]}, last_name: {singleUserData[3]}' except: return f'GET: {num}. No user. Status code - {response_data.status_code}, Payload - {response_data.json()}'
def run_kmeans(): data = Data(FILENAME) k = 4 r = 10 kmeans = Kmeans() kmeans.train(data.x23.T, k, 20) colors = ["green", "black", "red", "yellow"] for i, cluster in enumerate(kmeans.best_clusters): plt.scatter(data.x23[i][0], data.x23[i][1], color=colors[int(cluster)]) # print kmeans.best_clusters plt.savefig("kmeans") plt.clf()
def _getWeightedUpdraft(self): dataframe = self.simulationCompleteData dataframe["wposWeighted"] = numpy.zeros(numpy.shape(dataframe["wpos"])) print(f"{self.name} Start calculation of weighted updrafts " + self.name) t1 = time.time() for emul in self.simulationCollection: filename = self.simulationCollection[emul].getNCDatasetFileName() ncData = xarray.open_dataset(filename) timeStartInd = Data.getClosestIndex(ncData.time.values, self.timeStart * 3600.) timeEndInd = Data.getClosestIndex(ncData.time.values, self.timeEnd * 3600.) + 1 ncDataSliced = ncData.isel(time=slice(timeStartInd, timeEndInd)) cloudMaskHalf = self.__getCloudMask( ncDataSliced["l"].values, self.__maskCloudColumnUpToBottomHalfOfCloud) wPosValuesHalfMaskedNANIncluded = ncDataSliced["w"].where( ncDataSliced["w"] > 0.).values[cloudMaskHalf] wPosValuesHalfMasked = wPosValuesHalfMaskedNANIncluded[ numpy.logical_not( numpy.isnan(wPosValuesHalfMaskedNANIncluded))] weighted = numpy.sum(numpy.power( wPosValuesHalfMasked, 2)) / numpy.sum(wPosValuesHalfMasked) dataframe.loc[dataframe.index == emul, "wposWeighted"] = weighted t2 = time.time() print(f"{self.name} Time to calculate updrafts {t2-t1:.1f}")
def test(self): # Initial observation env_info = self.env.reset() state = env_info # Data self.data = Data(1, 100) # Episodes done n_done = 0 # Test loop while n_done <= self.n_episodes: # Action of agent action, value = self.act(state) # Send the action to the environment next_state, reward, done, info = self.env.step(action) # Update t_step self.t_step += 1 # Update n_done if done: n_done += 1 # Next state state = next_state # Update the score reward_ = np.expand_dims(reward, axis=0) value_ = value.unsqueeze(0) done_ = np.expand_dims(done, axis=0) self.data.update_score(reward_, value_, done_, self.t_step) # Summary if done: self.data.summary(self.t_step)
def training(self): def _loss(y_true, y_pred): print('y_pred.shape:', y_pred.shape) print('y_true.shape: ', y_true.shape) labels = tf.cast(tf.squeeze(y_true), tf.int64) return tf.reduce_mean( input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits( logits=y_pred, labels=labels)) print('Executing eagerly? ', tf.executing_eagerly()) self.build_graph() learning_rate = ExponentialDecay(initial_learning_rate=LEARNING_RATE, decay_steps=2000, decay_rate=0.97, staircase=True) optimizer = optimizers.Adam(learning_rate=learning_rate) #self.model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"]) self.model.compile(loss=_loss, optimizer=optimizer, metrics=["accuracy"]) self.model.run_eagerly = True self.model.summary() #keras.utils.plot_model(self.model, show_shapes=True) checkpoint_callback = callbacks.ModelCheckpoint(MODEL_NAME + ".h5", save_freq='epoch') time_callback = TimeHistory() run_logdir = self.get_run_logdir() tensorboard_callback = callbacks.TensorBoard(run_logdir) data = Data() train_dataset = data.prepare_data_for_training(Data.DATA_TRAINING, batch_size) history = self.model.fit(train_dataset, steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, callbacks=[time_callback]) #score = self.model.evaluate(X_test, y_test) self.save_model()
def __init__(self,base=None): """ Initializes the new Diffed_Data() class through a deepcopy Assumptions: N/A Source: N/A Inputs: N/A Outputs: N/A Properties Used: N/A """ if base is None: base = Data() self._base = base this = deepcopy(base) # deepcopy is needed here to build configs - Feb 2016, T. MacDonald Data.__init__(self,this)
def __init__(self): self.builder = gtk.Builder() self.builder.add_from_file("FormRecuperar.glade") self.window = self.builder.get_object("formRecuperar") self._lblDominio = self.builder.get_object("txtDominio") self.builder.connect_signals(self) self.datos = Data("cv.txt") self.datos.file_to_open() texto = self.datos.read_to_file() texto = self.datos.split_to_string(texto,"\n") datos=[self.datos.split_to_string(texto[0],":"), self.datos.split_to_string(texto[1],":"), ] self._lblDominio.set_text(datos[0][1].strip())
def __init__(self, client): AppResponse.__init__(self, client) self.dependency(js='jquery2') self.dependency(css='fontawesome') self.__idx = int(client.arg('idx')) self.__mode = client.arg('mode') assert self.__mode in ['round', 'series', 'form'], 'invalid mode arg' self.__part = 0 if self.__mode == 'series': self.__part = int(client.arg('part')) self.data = Data() self.standings = Standings(self.data)
def fitShot(shotNum, specFlag = "tsc", numProcs = None, burstLen = 0): """This function fits an entire shot. This version will use up to the number of detected processors on the machine if the multiprocessing module is installed. The multiprocessing module is a standard part of python 2.6, and has been backported to 2.5: http://code.google.com/p/python-multiprocessing/ Parameters: shotNum -- The shot number to fit. For example 1070814040. numProcs -- The number of processes to create. Defaults to the number of CPUs in the machine. burstLen -- For culling of extra Fast Thomson laser diode pulses Default of 0 acts like old system, keeping all pulses Any other value n will cause laser diode to skip pulses ~200 us after n pulses before looking for next burst """ data = Data() try: data.loadData(shotNum) except Exception, ex: print "Failed to fit shot:", ex return None
def custom_demo(use_old_data=True): draw = Draw() if use_old_data: data = pickle.load("custom_data.p") if not use_old_data: img = np.concatenate(([1], np.ravel(draw.get_img()))) label = raw_input("What did you draw? ") num_features = np.size(img) - 1 # Subtract 1 for the bias term features = [img] labels = [label] data = Data(features=features, labels=labels, theta=np.zeros((1, 785)), num_features=num_features, num_labels=1, num_examples=1, alpha=0.01, epsilon=0.1, label_set=[label]) _gradient_ascent(data) try: while True: img = np.concatenate(([1], np.ravel(draw.get_img()))) prediction = _predict(img, data) print 'I think you drew a', data.label_set[prediction] label = raw_input("What did you draw? ") data.features.append(img) data.labels.append(label) data.num_examples += 1 if label not in data.label_set: data.label_set.append(label) data.num_labels += 1 new_row = np.zeros((1, data.num_features+1)) #New label means we must add a row to theta data.theta = np.vstack((data.theta, new_row)) _gradient_ascent(data) except KeyboardInterrupt: save = raw_input("Save data? ").lower() if save == 'y' or save == 'yes': pickle.dump(data, "custom_data.p")
def _run_systems(self): for k in range(1, self.max_len+1): features = list(set(self._feature_series(k, self.features_event_event) + self._feature_series(k, self.features_event_timex))) print features data = Data() data.training = TrainingSet(False, False, "data/training/TBAQ-cleaned/TimeBank/") system = System(data, features) system.create_features() system.cross_validation() now = list(set(self._feature_series(k, self.features_event_event))) if k > 1: prev = list(set(self._feature_series(k-1, self.features_event_event))) if k > 1: if now != prev: self.accuracies_event_event.append(system.crossval_accuracy_event_event) print system.crossval_accuracy_event_event else: self.accuracies_event_event.append(system.crossval_accuracy_event_event) print system.crossval_accuracy_event_event now = list(set(self._feature_series(k, self.features_event_timex))) if k > 1: prev = list(set(self._feature_series(k-1, self.features_event_timex))) if k > 1: if now != prev: self.accuracies_event_timex.append(system.crossval_accuracy_event_timex) print system.crossval_accuracy_event_timex else: self.accuracies_event_timex.append(system.crossval_accuracy_event_timex) print system.crossval_accuracy_event_timex print